added vendors

This commit is contained in:
Paul 2020-03-02 23:30:09 +01:00
parent 63719c9c19
commit 1713941d93
88 changed files with 17511 additions and 0 deletions

3
vendor/github.com/korylprince/go-ad-auth/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
*.swp
.env
tags

9
vendor/github.com/korylprince/go-ad-auth/LICENSE generated vendored Normal file
View File

@ -0,0 +1,9 @@
MIT License
Copyright (c) 2018 Kory Prince
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

78
vendor/github.com/korylprince/go-ad-auth/README.md generated vendored Normal file
View File

@ -0,0 +1,78 @@
[![GoDoc](https://godoc.org/gopkg.in/korylprince/go-ad-auth.v2?status.svg)](https://godoc.org/gopkg.in/korylprince/go-ad-auth.v2)
# About
`go-ad-auth` is a simple wrapper around the great [ldap](https://github.com/go-ldap/ldap) library to help with Active Directory authentication.
# Installing
`go get gopkg.in/korylprince/go-ad-auth.v2`
**Dependencies:**
* [github.com/go-ldap/ldap](https://github.com/go-ldap/ldap)
* [golang.org/x/text/encoding/unicode](https://godoc.org/golang.org/x/text/encoding/unicode)
If you have any issues or questions [create an issue](https://github.com/korylprince/go-ad-auth/issues).
# New API
The `v2` API is almost a complete rewrite of the older [`gopkg.in/korylprince/go-ad-auth.v1`](https://godoc.org/gopkg.in/korylprince/go-ad-auth.v1) API. There are similarities, but `v2` is not backwards-compatible.
The new API is cleaner, more idiomatic, exposes a lot more functionality and is fully testable.
One notable difference to be careful of is that while `v1`'s `Login` will return `false` if the user is not in the specified group, `v2`'s `AuthenticateExtended` will return `true` if the user authenticated successfully, regardless if they were in any of the specified groups or not.
# Usage
`godoc gopkg.in/korylprince/go-ad-auth.v2`
Example:
```go
config := &auth.Config{
Server: "ldap.example.com",
Port: 389,
BaseDN: "OU=Users,DC=example,DC=com",
Security: auth.SecurityStartTLS,
}
username := "user"
password := "pass"
status, err := auth.Authenticate(config, username, password)
if err != nil {
//handle err
return
}
if !status {
//handle failed authentication
return
}
```
See more examples on [GoDoc](https://godoc.org/gopkg.in/korylprince/go-ad-auth.v2).
# Testing
`go test -v`
Most tests will be skipped unless you supply the following environment variables to connect to an Active Directory server:
| Name | Description |
| ----------------------- | ------------- |
| ADTEST_SERVER | Hostname or IP Address of an Active Directory server |
| ADTEST_PORT | Port to use - defaults to 389 |
| ADTEST_BIND_UPN | userPrincipalName (user@domain.tld) of admin user |
| ADTEST_BIND_PASS | Password of admin user |
| ADTEST_BIND_SECURITY | `NONE` \|\| `TLS` \|\| `STARTTLS` - defaults to `STARTTLS` |
| ADTEST_BASEDN | LDAP Base DN - for testing the root DN is recommended, e.g. `DC=example,DC=com` |
| ADTEST_PASSWORD_UPN | userPrincipalName of a test user that will be used to test password changing functions |
# Security
[SQL Injection](https://en.wikipedia.org/wiki/SQL_injection) is a well known attack vector, and most SQL libraries provide mitigations such as [prepared statements](https://en.wikipedia.org/wiki/Prepared_statement). Similarly, [LDAP Injection](https://www.owasp.org/index.php/Testing_for_LDAP_Injection_\(OTG-INPVAL-006\)), while not seen often in the wild, is something we should be concerned with.
Since `v2.2.0`, this library sanitizes inputs (with [`ldap.EscapeFilter`](https://godoc.org/gopkg.in/ldap.v3#EscapeFilter)) that are used to create LDAP filters in library functions, namely [`GetDN`](https://godoc.org/gopkg.in/korylprince/go-ad-auth.v2#Conn.GetDN) and [`GetAttributes`](https://godoc.org/gopkg.in/korylprince/go-ad-auth.v2#Conn.GetAttributes). This means high level functions in this library are protected against malicious inputs. If you use [`Search`](https://godoc.org/gopkg.in/korylprince/go-ad-auth.v2#Conn.Search) or [`SearchOne`](https://godoc.org/gopkg.in/korylprince/go-ad-auth.v2#Conn.SearchOne), take care to sanitize any untrusted inputs you use in your LDAP filter.

94
vendor/github.com/korylprince/go-ad-auth/auth.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
package auth
import ldap "gopkg.in/ldap.v3"
//Authenticate checks if the given credentials are valid, or returns an error if one occurred.
//username may be either the sAMAccountName or the userPrincipalName.
func Authenticate(config *Config, username, password string) (bool, error) {
upn, err := config.UPN(username)
if err != nil {
return false, err
}
conn, err := config.Connect()
if err != nil {
return false, err
}
defer conn.Conn.Close()
return conn.Bind(upn, password)
}
//AuthenticateExtended checks if the given credentials are valid, or returns an error if one occurred.
//username may be either the sAMAccountName or the userPrincipalName.
//entry is the *ldap.Entry that holds the DN and any request attributes of the user.
//If groups is non-empty, userGroups will hold which of those groups the user is a member of.
//groups can be a list of groups referenced by DN or cn and the format provided will be the format returned.
func AuthenticateExtended(config *Config, username, password string, attrs, groups []string) (status bool, entry *ldap.Entry, userGroups []string, err error) {
upn, err := config.UPN(username)
if err != nil {
return false, nil, nil, err
}
conn, err := config.Connect()
if err != nil {
return false, nil, nil, err
}
defer conn.Conn.Close()
//bind
status, err = conn.Bind(upn, password)
if err != nil {
return false, nil, nil, err
}
if !status {
return false, nil, nil, nil
}
//add memberOf attribute if necessary
memberOfPresent := false
for _, a := range attrs {
if a == "memberOf" {
memberOfPresent = true
break
}
}
if !memberOfPresent && len(groups) > 0 {
attrs = append(attrs, "memberOf")
}
//get entry
entry, err = conn.GetAttributes("userPrincipalName", upn, attrs)
if err != nil {
return false, nil, nil, err
}
if len(groups) > 0 {
for _, group := range groups {
groupDN, err := conn.GroupDN(group)
if err != nil {
return false, nil, nil, err
}
for _, userGroup := range entry.GetAttributeValues("memberOf") {
if userGroup == groupDN {
userGroups = append(userGroups, group)
break
}
}
}
}
//remove memberOf if it wasn't requested
if !memberOfPresent && len(groups) > 0 {
var entryAttrs []*ldap.EntryAttribute
for _, e := range entry.Attributes {
if e.Name != "memberOf" {
entryAttrs = append(entryAttrs, e)
}
}
entry.Attributes = entryAttrs
}
return status, entry, userGroups, nil
}

54
vendor/github.com/korylprince/go-ad-auth/config.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package auth
import (
"errors"
"fmt"
"net/mail"
"strings"
)
//SecurityType specifies the type of security to use when connecting to an Active Directory Server.
type SecurityType int
//Security will default to SecurityNone if not given.
const (
SecurityNone SecurityType = iota
SecurityTLS
SecurityStartTLS
)
//Config contains settings for connecting to an Active Directory server.
type Config struct {
Server string
Port int
BaseDN string
Security SecurityType
}
//Domain returns the domain derived from BaseDN or an error if misconfigured.
func (c *Config) Domain() (string, error) {
domain := ""
for _, v := range strings.Split(strings.ToLower(c.BaseDN), ",") {
if trimmed := strings.TrimSpace(v); strings.HasPrefix(trimmed, "dc=") {
domain = domain + "." + trimmed[3:]
}
}
if len(domain) <= 1 {
return "", errors.New("Configuration error: invalid BaseDN")
}
return domain[1:], nil
}
//UPN returns the userPrincipalName for the given username or an error if misconfigured.
func (c *Config) UPN(username string) (string, error) {
if _, err := mail.ParseAddress(username); err == nil {
return username, nil
}
domain, err := c.Domain()
if err != nil {
return "", err
}
return fmt.Sprintf("%s@%s", username, domain), nil
}

65
vendor/github.com/korylprince/go-ad-auth/conn.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
package auth
import (
"crypto/tls"
"errors"
"fmt"
ldap "gopkg.in/ldap.v3"
)
//Conn represents an Active Directory connection.
type Conn struct {
Conn *ldap.Conn
Config *Config
}
//Connect returns an open connection to an Active Directory server or an error if one occurred.
func (c *Config) Connect() (*Conn, error) {
switch c.Security {
case SecurityNone:
conn, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", c.Server, c.Port))
if err != nil {
return nil, fmt.Errorf("Connection error: %v", err)
}
return &Conn{Conn: conn, Config: c}, nil
case SecurityTLS:
conn, err := ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", c.Server, c.Port), &tls.Config{ServerName: c.Server})
if err != nil {
return nil, fmt.Errorf("Connection error: %v", err)
}
return &Conn{Conn: conn, Config: c}, nil
case SecurityStartTLS:
conn, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", c.Server, c.Port))
if err != nil {
return nil, fmt.Errorf("Connection error: %v", err)
}
err = conn.StartTLS(&tls.Config{ServerName: c.Server})
if err != nil {
return nil, fmt.Errorf("Connection error: %v", err)
}
return &Conn{Conn: conn, Config: c}, nil
default:
return nil, errors.New("Configuration error: invalid SecurityType")
}
}
//Bind authenticates the connection with the given userPrincipalName and password
//and returns the result or an error if one occurred.
func (c *Conn) Bind(upn, password string) (bool, error) {
if password == "" {
return false, nil
}
err := c.Conn.Bind(upn, password)
if err != nil {
if e, ok := err.(*ldap.Error); ok {
if e.ResultCode == ldap.LDAPResultInvalidCredentials {
return false, nil
}
}
return false, fmt.Errorf("Bind error (%s): %v", upn, err)
}
return true, nil
}

35
vendor/github.com/korylprince/go-ad-auth/group.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package auth
import (
"strings"
)
//GroupDN returns the DN of the group with the given cn or an error if one occurred.
func (c *Conn) GroupDN(group string) (string, error) {
if strings.HasSuffix(group, c.Config.BaseDN) {
return group, nil
}
return c.GetDN("cn", group)
}
//ObjectGroups returns which of the given groups (referenced by DN) the object with the given attribute value is in,
//if any, or an error if one occurred.
func (c *Conn) ObjectGroups(attr, value string, groups []string) ([]string, error) {
entry, err := c.GetAttributes(attr, value, []string{"memberOf"})
if err != nil {
return nil, err
}
var objectGroups []string
for _, objectGroup := range entry.GetAttributeValues("memberOf") {
for _, parentGroup := range groups {
if objectGroup == parentGroup {
objectGroups = append(objectGroups, parentGroup)
continue
}
}
}
return objectGroups, nil
}

80
vendor/github.com/korylprince/go-ad-auth/passwd.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package auth
import (
"errors"
"fmt"
"golang.org/x/text/encoding/unicode"
ldap "gopkg.in/ldap.v3"
)
//ModifyDNPassword sets a new password for the given user or returns an error if one occurred.
//ModifyDNPassword is used for resetting user passwords using administrative privileges.
func (c *Conn) ModifyDNPassword(dn, newPasswd string) error {
utf16 := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
encoded, err := utf16.NewEncoder().String(fmt.Sprintf(`"%s"`, newPasswd))
if err != nil {
return fmt.Errorf("Password error: Unable to encode password: %v", err)
}
req := ldap.NewModifyRequest(dn, nil)
req.Replace("unicodePwd", []string{encoded})
err = c.Conn.Modify(req)
if err != nil {
return fmt.Errorf("Password error: Unable to modify password: %v", err)
}
return nil
}
//UpdatePassword checks if the given credentials are valid and updates the password if they are,
//or returns an error if one occurred. UpdatePassword is used for users resetting their own password.
func UpdatePassword(config *Config, username, oldPasswd, newPasswd string) error {
utf16 := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
oldEncoded, err := utf16.NewEncoder().String(fmt.Sprintf(`"%s"`, oldPasswd))
if err != nil {
return fmt.Errorf("Password error: Unable to encode old password: %v", err)
}
newEncoded, err := utf16.NewEncoder().String(fmt.Sprintf(`"%s"`, newPasswd))
if err != nil {
return fmt.Errorf("Password error: Unable to encode new password: %v", err)
}
upn, err := config.UPN(username)
if err != nil {
return err
}
conn, err := config.Connect()
if err != nil {
return err
}
defer conn.Conn.Close()
//bind
status, err := conn.Bind(upn, oldPasswd)
if err != nil {
return err
}
if !status {
return errors.New("Password error: credentials not valid")
}
dn, err := conn.GetDN("userPrincipalName", upn)
if err != nil {
return err
}
req := ldap.NewModifyRequest(dn, nil)
req.Delete("unicodePwd", []string{oldEncoded})
req.Add("unicodePwd", []string{newEncoded})
err = conn.Conn.Modify(req)
if err != nil {
return fmt.Errorf("Password error: Unable to modify password: %v", err)
}
return nil
}

78
vendor/github.com/korylprince/go-ad-auth/search.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package auth
import (
"fmt"
ldap "gopkg.in/ldap.v3"
)
//Search returns the entries for the given search criteria or an error if one occurred.
func (c *Conn) Search(filter string, attrs []string, sizeLimit int) ([]*ldap.Entry, error) {
search := ldap.NewSearchRequest(
c.Config.BaseDN,
ldap.ScopeWholeSubtree,
ldap.DerefAlways,
sizeLimit,
0,
false,
filter,
attrs,
nil,
)
result, err := c.Conn.Search(search)
if err != nil {
return nil, fmt.Errorf(`Search error "%s": %v`, filter, err)
}
return result.Entries, nil
}
//SearchOne returns the single entry for the given search criteria or an error if one occurred.
//An error is returned if exactly one entry is not returned.
func (c *Conn) SearchOne(filter string, attrs []string) (*ldap.Entry, error) {
search := ldap.NewSearchRequest(
c.Config.BaseDN,
ldap.ScopeWholeSubtree,
ldap.DerefAlways,
1,
0,
false,
filter,
attrs,
nil,
)
result, err := c.Conn.Search(search)
if err != nil {
if e, ok := err.(*ldap.Error); ok {
if e.ResultCode == ldap.LDAPResultSizeLimitExceeded {
return nil, fmt.Errorf(`Search error "%s": more than one entries returned`, filter)
}
}
return nil, fmt.Errorf(`Search error "%s": %v`, filter, err)
}
if len(result.Entries) == 0 {
return nil, fmt.Errorf(`Search error "%s": no entries returned`, filter)
}
return result.Entries[0], nil
}
//GetDN returns the DN for the object with the given attribute value or an error if one occurred.
//attr and value are sanitized.
func (c *Conn) GetDN(attr, value string) (string, error) {
entry, err := c.SearchOne(fmt.Sprintf("(%s=%s)", ldap.EscapeFilter(attr), ldap.EscapeFilter(value)), nil)
if err != nil {
return "", err
}
return entry.DN, nil
}
//GetAttributes returns the *ldap.Entry with the given attributes for the object with the given attribute value or an error if one occurred.
//attr and value are sanitized.
func (c *Conn) GetAttributes(attr, value string, attrs []string) (*ldap.Entry, error) {
return c.SearchOne(fmt.Sprintf("(%s=%s)", ldap.EscapeFilter(attr), ldap.EscapeFilter(value)), attrs)
}

3
vendor/golang.org/x/text/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/text/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/text/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/text/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

335
vendor/golang.org/x/text/encoding/encoding.go generated vendored Normal file
View File

@ -0,0 +1,335 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package encoding defines an interface for character encodings, such as Shift
// JIS and Windows 1252, that can convert to and from UTF-8.
//
// Encoding implementations are provided in other packages, such as
// golang.org/x/text/encoding/charmap and
// golang.org/x/text/encoding/japanese.
package encoding // import "golang.org/x/text/encoding"
import (
"errors"
"io"
"strconv"
"unicode/utf8"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// TODO:
// - There seems to be some inconsistency in when decoders return errors
// and when not. Also documentation seems to suggest they shouldn't return
// errors at all (except for UTF-16).
// - Encoders seem to rely on or at least benefit from the input being in NFC
// normal form. Perhaps add an example how users could prepare their output.
// Encoding is a character set encoding that can be transformed to and from
// UTF-8.
type Encoding interface {
// NewDecoder returns a Decoder.
NewDecoder() *Decoder
// NewEncoder returns an Encoder.
NewEncoder() *Encoder
}
// A Decoder converts bytes to UTF-8. It implements transform.Transformer.
//
// Transforming source bytes that are not of that encoding will not result in an
// error per se. Each byte that cannot be transcoded will be represented in the
// output by the UTF-8 encoding of '\uFFFD', the replacement rune.
type Decoder struct {
transform.Transformer
// This forces external creators of Decoders to use names in struct
// initializers, allowing for future extendibility without having to break
// code.
_ struct{}
}
// Bytes converts the given encoded bytes to UTF-8. It returns the converted
// bytes or nil, err if any error occurred.
func (d *Decoder) Bytes(b []byte) ([]byte, error) {
b, _, err := transform.Bytes(d, b)
if err != nil {
return nil, err
}
return b, nil
}
// String converts the given encoded string to UTF-8. It returns the converted
// string or "", err if any error occurred.
func (d *Decoder) String(s string) (string, error) {
s, _, err := transform.String(d, s)
if err != nil {
return "", err
}
return s, nil
}
// Reader wraps another Reader to decode its bytes.
//
// The Decoder may not be used for any other operation as long as the returned
// Reader is in use.
func (d *Decoder) Reader(r io.Reader) io.Reader {
return transform.NewReader(r, d)
}
// An Encoder converts bytes from UTF-8. It implements transform.Transformer.
//
// Each rune that cannot be transcoded will result in an error. In this case,
// the transform will consume all source byte up to, not including the offending
// rune. Transforming source bytes that are not valid UTF-8 will be replaced by
// `\uFFFD`. To return early with an error instead, use transform.Chain to
// preprocess the data with a UTF8Validator.
type Encoder struct {
transform.Transformer
// This forces external creators of Encoders to use names in struct
// initializers, allowing for future extendibility without having to break
// code.
_ struct{}
}
// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
// any error occurred.
func (e *Encoder) Bytes(b []byte) ([]byte, error) {
b, _, err := transform.Bytes(e, b)
if err != nil {
return nil, err
}
return b, nil
}
// String converts a string from UTF-8. It returns the converted string or
// "", err if any error occurred.
func (e *Encoder) String(s string) (string, error) {
s, _, err := transform.String(e, s)
if err != nil {
return "", err
}
return s, nil
}
// Writer wraps another Writer to encode its UTF-8 output.
//
// The Encoder may not be used for any other operation as long as the returned
// Writer is in use.
func (e *Encoder) Writer(w io.Writer) io.Writer {
return transform.NewWriter(w, e)
}
// ASCIISub is the ASCII substitute character, as recommended by
// https://unicode.org/reports/tr36/#Text_Comparison
const ASCIISub = '\x1a'
// Nop is the nop encoding. Its transformed bytes are the same as the source
// bytes; it does not replace invalid UTF-8 sequences.
var Nop Encoding = nop{}
type nop struct{}
func (nop) NewDecoder() *Decoder {
return &Decoder{Transformer: transform.Nop}
}
func (nop) NewEncoder() *Encoder {
return &Encoder{Transformer: transform.Nop}
}
// Replacement is the replacement encoding. Decoding from the replacement
// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to
// the replacement encoding yields the same as the source bytes except that
// invalid UTF-8 is converted to '\uFFFD'.
//
// It is defined at http://encoding.spec.whatwg.org/#replacement
var Replacement Encoding = replacement{}
type replacement struct{}
func (replacement) NewDecoder() *Decoder {
return &Decoder{Transformer: replacementDecoder{}}
}
func (replacement) NewEncoder() *Encoder {
return &Encoder{Transformer: replacementEncoder{}}
}
func (replacement) ID() (mib identifier.MIB, other string) {
return identifier.Replacement, ""
}
type replacementDecoder struct{ transform.NopResetter }
func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(dst) < 3 {
return 0, 0, transform.ErrShortDst
}
if atEOF {
const fffd = "\ufffd"
dst[0] = fffd[0]
dst[1] = fffd[1]
dst[2] = fffd[2]
nDst = 3
}
return nDst, len(src), nil
}
type replacementEncoder struct{ transform.NopResetter }
func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
r, size := rune(0), 0
for ; nSrc < len(src); nSrc += size {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
r = '\ufffd'
}
}
if nDst+utf8.RuneLen(r) > len(dst) {
err = transform.ErrShortDst
break
}
nDst += utf8.EncodeRune(dst[nDst:], r)
}
return nDst, nSrc, err
}
// HTMLEscapeUnsupported wraps encoders to replace source runes outside the
// repertoire of the destination encoding with HTML escape sequences.
//
// This wrapper exists to comply to URL and HTML forms requiring a
// non-terminating legacy encoder. The produced sequences may lead to data
// loss as they are indistinguishable from legitimate input. To avoid this
// issue, use UTF-8 encodings whenever possible.
func HTMLEscapeUnsupported(e *Encoder) *Encoder {
return &Encoder{Transformer: &errorHandler{e, errorToHTML}}
}
// ReplaceUnsupported wraps encoders to replace source runes outside the
// repertoire of the destination encoding with an encoding-specific
// replacement.
//
// This wrapper is only provided for backwards compatibility and legacy
// handling. Its use is strongly discouraged. Use UTF-8 whenever possible.
func ReplaceUnsupported(e *Encoder) *Encoder {
return &Encoder{Transformer: &errorHandler{e, errorToReplacement}}
}
type errorHandler struct {
*Encoder
handler func(dst []byte, r rune, err repertoireError) (n int, ok bool)
}
// TODO: consider making this error public in some form.
type repertoireError interface {
Replacement() byte
}
func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF)
for err != nil {
rerr, ok := err.(repertoireError)
if !ok {
return nDst, nSrc, err
}
r, sz := utf8.DecodeRune(src[nSrc:])
n, ok := h.handler(dst[nDst:], r, rerr)
if !ok {
return nDst, nSrc, transform.ErrShortDst
}
err = nil
nDst += n
if nSrc += sz; nSrc < len(src) {
var dn, sn int
dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF)
nDst += dn
nSrc += sn
}
}
return nDst, nSrc, err
}
func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) {
buf := [8]byte{}
b := strconv.AppendUint(buf[:0], uint64(r), 10)
if n = len(b) + len("&#;"); n >= len(dst) {
return 0, false
}
dst[0] = '&'
dst[1] = '#'
dst[copy(dst[2:], b)+2] = ';'
return n, true
}
func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) {
if len(dst) == 0 {
return 0, false
}
dst[0] = err.Replacement()
return 1, true
}
// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8.
var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8")
// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first
// input byte that is not valid UTF-8.
var UTF8Validator transform.Transformer = utf8Validator{}
type utf8Validator struct{ transform.NopResetter }
func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := len(src)
if n > len(dst) {
n = len(dst)
}
for i := 0; i < n; {
if c := src[i]; c < utf8.RuneSelf {
dst[i] = c
i++
continue
}
_, size := utf8.DecodeRune(src[i:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
err = ErrInvalidUTF8
if !atEOF && !utf8.FullRune(src[i:]) {
err = transform.ErrShortSrc
}
return i, i, err
}
if i+size > len(dst) {
return i, i, transform.ErrShortDst
}
for ; size > 0; size-- {
dst[i] = src[i]
i++
}
}
if len(src) > len(dst) {
err = transform.ErrShortDst
}
return n, n, err
}

View File

@ -0,0 +1,81 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package identifier defines the contract between implementations of Encoding
// and Index by defining identifiers that uniquely identify standardized coded
// character sets (CCS) and character encoding schemes (CES), which we will
// together refer to as encodings, for which Encoding implementations provide
// converters to and from UTF-8. This package is typically only of concern to
// implementers of Indexes and Encodings.
//
// One part of the identifier is the MIB code, which is defined by IANA and
// uniquely identifies a CCS or CES. Each code is associated with data that
// references authorities, official documentation as well as aliases and MIME
// names.
//
// Not all CESs are covered by the IANA registry. The "other" string that is
// returned by ID can be used to identify other character sets or versions of
// existing ones.
//
// It is recommended that each package that provides a set of Encodings provide
// the All and Common variables to reference all supported encodings and
// commonly used subset. This allows Index implementations to include all
// available encodings without explicitly referencing or knowing about them.
package identifier
// Note: this package is internal, but could be made public if there is a need
// for writing third-party Indexes and Encodings.
// References:
// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
// - http://www.ietf.org/rfc/rfc2978.txt
// - https://www.unicode.org/reports/tr22/
// - http://www.w3.org/TR/encoding/
// - https://encoding.spec.whatwg.org/
// - https://encoding.spec.whatwg.org/encodings.json
// - https://tools.ietf.org/html/rfc6657#section-5
// Interface can be implemented by Encodings to define the CCS or CES for which
// it implements conversions.
type Interface interface {
// ID returns an encoding identifier. Exactly one of the mib and other
// values should be non-zero.
//
// In the usual case it is only necessary to indicate the MIB code. The
// other string can be used to specify encodings for which there is no MIB,
// such as "x-mac-dingbat".
//
// The other string may only contain the characters a-z, A-Z, 0-9, - and _.
ID() (mib MIB, other string)
// NOTE: the restrictions on the encoding are to allow extending the syntax
// with additional information such as versions, vendors and other variants.
}
// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
// some identifiers for some encodings that are not covered by the IANA
// standard.
//
// See http://www.iana.org/assignments/ianacharset-mib.
type MIB uint16
// These additional MIB types are not defined in IANA. They are added because
// they are common and defined within the text repo.
const (
// Unofficial marks the start of encodings not registered by IANA.
Unofficial MIB = 10000 + iota
// Replacement is the WhatWG replacement encoding.
Replacement
// XUserDefined is the code for x-user-defined.
XUserDefined
// MacintoshCyrillic is the code for x-mac-cyrillic.
MacintoshCyrillic
)

File diff suppressed because it is too large Load Diff

75
vendor/golang.org/x/text/encoding/internal/internal.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal contains code that is shared among encoding implementations.
package internal
import (
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// Encoding is an implementation of the Encoding interface that adds the String
// and ID methods to an existing encoding.
type Encoding struct {
encoding.Encoding
Name string
MIB identifier.MIB
}
// _ verifies that Encoding implements identifier.Interface.
var _ identifier.Interface = (*Encoding)(nil)
func (e *Encoding) String() string {
return e.Name
}
func (e *Encoding) ID() (mib identifier.MIB, other string) {
return e.MIB, ""
}
// SimpleEncoding is an Encoding that combines two Transformers.
type SimpleEncoding struct {
Decoder transform.Transformer
Encoder transform.Transformer
}
func (e *SimpleEncoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: e.Decoder}
}
func (e *SimpleEncoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: e.Encoder}
}
// FuncEncoding is an Encoding that combines two functions returning a new
// Transformer.
type FuncEncoding struct {
Decoder func() transform.Transformer
Encoder func() transform.Transformer
}
func (e FuncEncoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: e.Decoder()}
}
func (e FuncEncoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: e.Encoder()}
}
// A RepertoireError indicates a rune is not in the repertoire of a destination
// encoding. It is associated with an encoding-specific suggested replacement
// byte.
type RepertoireError byte
// Error implements the error interrface.
func (r RepertoireError) Error() string {
return "encoding: rune not supported by encoding."
}
// Replacement returns the replacement string associated with this error.
func (r RepertoireError) Replacement() byte { return byte(r) }
var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub)

82
vendor/golang.org/x/text/encoding/unicode/override.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unicode
import (
"golang.org/x/text/transform"
)
// BOMOverride returns a new decoder transformer that is identical to fallback,
// except that the presence of a Byte Order Mark at the start of the input
// causes it to switch to the corresponding Unicode decoding. It will only
// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE.
//
// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not
// just UTF-16 variants, and allowing falling back to any encoding scheme.
//
// This technique is recommended by the W3C for use in HTML 5: "For
// compatibility with deployed content, the byte order mark (also known as BOM)
// is considered more authoritative than anything else."
// http://www.w3.org/TR/encoding/#specification-hooks
//
// Using BOMOverride is mostly intended for use cases where the first characters
// of a fallback encoding are known to not be a BOM, for example, for valid HTML
// and most encodings.
func BOMOverride(fallback transform.Transformer) transform.Transformer {
// TODO: possibly allow a variadic argument of unicode encodings to allow
// specifying details of which fallbacks are supported as well as
// specifying the details of the implementations. This would also allow for
// support for UTF-32, which should not be supported by default.
return &bomOverride{fallback: fallback}
}
type bomOverride struct {
fallback transform.Transformer
current transform.Transformer
}
func (d *bomOverride) Reset() {
d.current = nil
d.fallback.Reset()
}
var (
// TODO: we could use decode functions here, instead of allocating a new
// decoder on every NewDecoder as IgnoreBOM decoders can be stateless.
utf16le = UTF16(LittleEndian, IgnoreBOM)
utf16be = UTF16(BigEndian, IgnoreBOM)
)
const utf8BOM = "\ufeff"
func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if d.current != nil {
return d.current.Transform(dst, src, atEOF)
}
if len(src) < 3 && !atEOF {
return 0, 0, transform.ErrShortSrc
}
d.current = d.fallback
bomSize := 0
if len(src) >= 2 {
if src[0] == 0xFF && src[1] == 0xFE {
d.current = utf16le.NewDecoder()
bomSize = 2
} else if src[0] == 0xFE && src[1] == 0xFF {
d.current = utf16be.NewDecoder()
bomSize = 2
} else if len(src) >= 3 &&
src[0] == utf8BOM[0] &&
src[1] == utf8BOM[1] &&
src[2] == utf8BOM[2] {
d.current = transform.Nop
bomSize = 3
}
}
if bomSize < len(src) {
nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF)
}
return nDst, nSrc + bomSize, err
}

434
vendor/golang.org/x/text/encoding/unicode/unicode.go generated vendored Normal file
View File

@ -0,0 +1,434 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package unicode provides Unicode encodings such as UTF-16.
package unicode // import "golang.org/x/text/encoding/unicode"
import (
"errors"
"unicode/utf16"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/internal/utf8internal"
"golang.org/x/text/runes"
"golang.org/x/text/transform"
)
// TODO: I think the Transformers really should return errors on unmatched
// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781,
// which leaves it open, but is suggested by WhatWG. It will allow for all error
// modes as defined by WhatWG: fatal, HTML and Replacement. This would require
// the introduction of some kind of error type for conveying the erroneous code
// point.
// UTF8 is the UTF-8 encoding.
var UTF8 encoding.Encoding = utf8enc
var utf8enc = &internal.Encoding{
&internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
"UTF-8",
identifier.UTF8,
}
type utf8Decoder struct{ transform.NopResetter }
func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
var pSrc int // point from which to start copy in src
var accept utf8internal.AcceptRange
// The decoder can only make the input larger, not smaller.
n := len(src)
if len(dst) < n {
err = transform.ErrShortDst
n = len(dst)
atEOF = false
}
for nSrc < n {
c := src[nSrc]
if c < utf8.RuneSelf {
nSrc++
continue
}
first := utf8internal.First[c]
size := int(first & utf8internal.SizeMask)
if first == utf8internal.FirstInvalid {
goto handleInvalid // invalid starter byte
}
accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift]
if nSrc+size > n {
if !atEOF {
// We may stop earlier than necessary here if the short sequence
// has invalid bytes. Not checking for this simplifies the code
// and may avoid duplicate computations in certain conditions.
if err == nil {
err = transform.ErrShortSrc
}
break
}
// Determine the maximal subpart of an ill-formed subsequence.
switch {
case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]:
size = 1
case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]:
size = 2
default:
size = 3 // As we are short, the maximum is 3.
}
goto handleInvalid
}
if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c {
size = 1
goto handleInvalid // invalid continuation byte
} else if size == 2 {
} else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c {
size = 2
goto handleInvalid // invalid continuation byte
} else if size == 3 {
} else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c {
size = 3
goto handleInvalid // invalid continuation byte
}
nSrc += size
continue
handleInvalid:
// Copy the scanned input so far.
nDst += copy(dst[nDst:], src[pSrc:nSrc])
// Append RuneError to the destination.
const runeError = "\ufffd"
if nDst+len(runeError) > len(dst) {
return nDst, nSrc, transform.ErrShortDst
}
nDst += copy(dst[nDst:], runeError)
// Skip the maximal subpart of an ill-formed subsequence according to
// the W3C standard way instead of the Go way. This Transform is
// probably the only place in the text repo where it is warranted.
nSrc += size
pSrc = nSrc
// Recompute the maximum source length.
if sz := len(dst) - nDst; sz < len(src)-nSrc {
err = transform.ErrShortDst
n = nSrc + sz
atEOF = false
}
}
return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err
}
// UTF16 returns a UTF-16 Encoding for the given default endianness and byte
// order mark (BOM) policy.
//
// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then
// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect
// the endianness used for decoding, and will instead be output as their
// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy
// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output.
// Instead, it overrides the default endianness e for the remainder of the
// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not
// affect the endianness used, and will instead be output as their standard
// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed
// with the default Endianness. For ExpectBOM, in that case, the transformation
// will return early with an ErrMissingBOM error.
//
// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of
// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not
// be inserted. The UTF-8 input does not need to contain a BOM.
//
// There is no concept of a 'native' endianness. If the UTF-16 data is produced
// and consumed in a greater context that implies a certain endianness, use
// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM.
//
// In the language of https://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM
// corresponds to "Where the precise type of the data stream is known... the
// BOM should not be used" and ExpectBOM corresponds to "A particular
// protocol... may require use of the BOM".
func UTF16(e Endianness, b BOMPolicy) encoding.Encoding {
return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]}
}
// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that
// some configurations map to the same MIB identifier. RFC 2781 has requirements
// and recommendations. Some of the "configurations" are merely recommendations,
// so multiple configurations could match.
var mibValue = map[Endianness][numBOMValues]identifier.MIB{
BigEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF16BE,
UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781.
// TODO: acceptBOM | strictBOM would map to UTF16BE as well.
},
LittleEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF16LE,
UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows.
// TODO: acceptBOM | strictBOM would map to UTF16LE as well.
},
// ExpectBOM is not widely used and has no valid MIB identifier.
}
// All lists a configuration for each IANA-defined UTF-16 variant.
var All = []encoding.Encoding{
UTF8,
UTF16(BigEndian, UseBOM),
UTF16(BigEndian, IgnoreBOM),
UTF16(LittleEndian, IgnoreBOM),
}
// BOMPolicy is a UTF-16 encoding's byte order mark policy.
type BOMPolicy uint8
const (
writeBOM BOMPolicy = 0x01
acceptBOM BOMPolicy = 0x02
requireBOM BOMPolicy = 0x04
bomMask BOMPolicy = 0x07
// HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a
// map of an array of length 8 of a type that is also used as a key or value
// in another map). See golang.org/issue/11354.
// TODO: consider changing this value back to 8 if the use of 1.4.* has
// been minimized.
numBOMValues = 8 + 1
// IgnoreBOM means to ignore any byte order marks.
IgnoreBOM BOMPolicy = 0
// Common and RFC 2781-compliant interpretation for UTF-16BE/LE.
// UseBOM means that the UTF-16 form may start with a byte order mark, which
// will be used to override the default encoding.
UseBOM BOMPolicy = writeBOM | acceptBOM
// Common and RFC 2781-compliant interpretation for UTF-16.
// ExpectBOM means that the UTF-16 form must start with a byte order mark,
// which will be used to override the default encoding.
ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM
// Used in Java as Unicode (not to be confused with Java's UTF-16) and
// ICU's UTF-16,version=1. Not compliant with RFC 2781.
// TODO (maybe): strictBOM: BOM must match Endianness. This would allow:
// - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM
// (UnicodeBig and UnicodeLittle in Java)
// - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E:
// acceptBOM | strictBOM (e.g. assigned to CheckBOM).
// This addition would be consistent with supporting ExpectBOM.
)
// Endianness is a UTF-16 encoding's default endianness.
type Endianness bool
const (
// BigEndian is UTF-16BE.
BigEndian Endianness = false
// LittleEndian is UTF-16LE.
LittleEndian Endianness = true
)
// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a
// starting byte order mark.
var ErrMissingBOM = errors.New("encoding: missing byte order mark")
type utf16Encoding struct {
config
mib identifier.MIB
}
type config struct {
endianness Endianness
bomPolicy BOMPolicy
}
func (u utf16Encoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: &utf16Decoder{
initial: u.config,
current: u.config,
}}
}
func (u utf16Encoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: &utf16Encoder{
endianness: u.endianness,
initialBOMPolicy: u.bomPolicy,
currentBOMPolicy: u.bomPolicy,
}}
}
func (u utf16Encoding) ID() (mib identifier.MIB, other string) {
return u.mib, ""
}
func (u utf16Encoding) String() string {
e, b := "B", ""
if u.endianness == LittleEndian {
e = "L"
}
switch u.bomPolicy {
case ExpectBOM:
b = "Expect"
case UseBOM:
b = "Use"
case IgnoreBOM:
b = "Ignore"
}
return "UTF-16" + e + "E (" + b + " BOM)"
}
type utf16Decoder struct {
initial config
current config
}
func (u *utf16Decoder) Reset() {
u.current = u.initial
}
func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(src) == 0 {
if atEOF && u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
return 0, 0, nil
}
if u.current.bomPolicy&acceptBOM != 0 {
if len(src) < 2 {
return 0, 0, transform.ErrShortSrc
}
switch {
case src[0] == 0xfe && src[1] == 0xff:
u.current.endianness = BigEndian
nSrc = 2
case src[0] == 0xff && src[1] == 0xfe:
u.current.endianness = LittleEndian
nSrc = 2
default:
if u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
}
u.current.bomPolicy = IgnoreBOM
}
var r rune
var dSize, sSize int
for nSrc < len(src) {
if nSrc+1 < len(src) {
x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1])
if u.current.endianness == LittleEndian {
x = x>>8 | x<<8
}
r, sSize = rune(x), 2
if utf16.IsSurrogate(r) {
if nSrc+3 < len(src) {
x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3])
if u.current.endianness == LittleEndian {
x = x>>8 | x<<8
}
// Save for next iteration if it is not a high surrogate.
if isHighSurrogate(rune(x)) {
r, sSize = utf16.DecodeRune(r, rune(x)), 4
}
} else if !atEOF {
err = transform.ErrShortSrc
break
}
}
if dSize = utf8.RuneLen(r); dSize < 0 {
r, dSize = utf8.RuneError, 3
}
} else if atEOF {
// Single trailing byte.
r, dSize, sSize = utf8.RuneError, 3, 1
} else {
err = transform.ErrShortSrc
break
}
if nDst+dSize > len(dst) {
err = transform.ErrShortDst
break
}
nDst += utf8.EncodeRune(dst[nDst:], r)
nSrc += sSize
}
return nDst, nSrc, err
}
func isHighSurrogate(r rune) bool {
return 0xDC00 <= r && r <= 0xDFFF
}
type utf16Encoder struct {
endianness Endianness
initialBOMPolicy BOMPolicy
currentBOMPolicy BOMPolicy
}
func (u *utf16Encoder) Reset() {
u.currentBOMPolicy = u.initialBOMPolicy
}
func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if u.currentBOMPolicy&writeBOM != 0 {
if len(dst) < 2 {
return 0, 0, transform.ErrShortDst
}
dst[0], dst[1] = 0xfe, 0xff
u.currentBOMPolicy = IgnoreBOM
nDst = 2
}
r, size := rune(0), 0
for nSrc < len(src) {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
}
if r <= 0xffff {
if nDst+2 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = uint8(r >> 8)
dst[nDst+1] = uint8(r)
nDst += 2
} else {
if nDst+4 > len(dst) {
err = transform.ErrShortDst
break
}
r1, r2 := utf16.EncodeRune(r)
dst[nDst+0] = uint8(r1 >> 8)
dst[nDst+1] = uint8(r1)
dst[nDst+2] = uint8(r2 >> 8)
dst[nDst+3] = uint8(r2)
nDst += 4
}
nSrc += size
}
if u.endianness == LittleEndian {
for i := 0; i < nDst; i += 2 {
dst[i], dst[i+1] = dst[i+1], dst[i]
}
}
return nDst, nSrc, err
}

View File

@ -0,0 +1,87 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utf8internal contains low-level utf8-related constants, tables, etc.
// that are used internally by the text package.
package utf8internal
// The default lowest and highest continuation byte.
const (
LoCB = 0x80 // 1000 0000
HiCB = 0xBF // 1011 1111
)
// Constants related to getting information of first bytes of UTF-8 sequences.
const (
// ASCII identifies a UTF-8 byte as ASCII.
ASCII = as
// FirstInvalid indicates a byte is invalid as a first byte of a UTF-8
// sequence.
FirstInvalid = xx
// SizeMask is a mask for the size bits. Use use x&SizeMask to get the size.
SizeMask = 7
// AcceptShift is the right-shift count for the first byte info byte to get
// the index into the AcceptRanges table. See AcceptRanges.
AcceptShift = 4
// The names of these constants are chosen to give nice alignment in the
// table below. The first nibble is an index into acceptRanges or F for
// special one-byte cases. The second nibble is the Rune length or the
// Status for the special one-byte case.
xx = 0xF1 // invalid: size 1
as = 0xF0 // ASCII: size 1
s1 = 0x02 // accept 0, size 2
s2 = 0x13 // accept 1, size 3
s3 = 0x03 // accept 0, size 3
s4 = 0x23 // accept 2, size 3
s5 = 0x34 // accept 3, size 4
s6 = 0x04 // accept 0, size 4
s7 = 0x44 // accept 4, size 4
)
// First is information about the first byte in a UTF-8 sequence.
var First = [256]uint8{
// 1 2 3 4 5 6 7 8 9 A B C D E F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
// 1 2 3 4 5 6 7 8 9 A B C D E F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
}
// AcceptRange gives the range of valid values for the second byte in a UTF-8
// sequence for any value for First that is not ASCII or FirstInvalid.
type AcceptRange struct {
Lo uint8 // lowest value for second byte.
Hi uint8 // highest value for second byte.
}
// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b
//
// AcceptRanges[First[b[0]]>>AcceptShift]
//
// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting
// at b[0].
var AcceptRanges = [...]AcceptRange{
0: {LoCB, HiCB},
1: {0xA0, HiCB},
2: {LoCB, 0x9F},
3: {0x90, HiCB},
4: {LoCB, 0x8F},
}

187
vendor/golang.org/x/text/runes/cond.go generated vendored Normal file
View File

@ -0,0 +1,187 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runes
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
// This is done for various reasons:
// - To retain the semantics of the Nop transformer: if input is passed to a Nop
// one would expect it to be unchanged.
// - It would be very expensive to pass a converted RuneError to a transformer:
// a transformer might need more source bytes after RuneError, meaning that
// the only way to pass it safely is to create a new buffer and manage the
// intermingling of RuneErrors and normal input.
// - Many transformers leave ill-formed UTF-8 as is, so this is not
// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
// logical consequence of the operation (as for Map) or if it otherwise would
// pose security concerns (as for Remove).
// - An alternative would be to return an error on ill-formed UTF-8, but this
// would be inconsistent with other operations.
// If returns a transformer that applies tIn to consecutive runes for which
// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
// is called on tIn and tNotIn at the start of each run. A Nop transformer will
// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
// to RuneError to determine which transformer to apply, but is passed as is to
// the respective transformer.
func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
if tIn == nil && tNotIn == nil {
return Transformer{transform.Nop}
}
if tIn == nil {
tIn = transform.Nop
}
if tNotIn == nil {
tNotIn = transform.Nop
}
sIn, ok := tIn.(transform.SpanningTransformer)
if !ok {
sIn = dummySpan{tIn}
}
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
if !ok {
sNotIn = dummySpan{tNotIn}
}
a := &cond{
tIn: sIn,
tNotIn: sNotIn,
f: s.Contains,
}
a.Reset()
return Transformer{a}
}
type dummySpan struct{ transform.Transformer }
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
return 0, transform.ErrEndOfSpan
}
type cond struct {
tIn, tNotIn transform.SpanningTransformer
f func(rune) bool
check func(rune) bool // current check to perform
t transform.SpanningTransformer // current transformer to use
}
// Reset implements transform.Transformer.
func (t *cond) Reset() {
t.check = t.is
t.t = t.tIn
t.t.Reset() // notIn will be reset on first usage.
}
func (t *cond) is(r rune) bool {
if t.f(r) {
return true
}
t.check = t.isNot
t.t = t.tNotIn
t.tNotIn.Reset()
return false
}
func (t *cond) isNot(r rune) bool {
if !t.f(r) {
return true
}
t.check = t.is
t.t = t.tIn
t.tIn.Reset()
return false
}
// This implementation of Span doesn't help all too much, but it needs to be
// there to satisfy this package's Transformer interface.
// TODO: there are certainly room for improvements, though. For example, if
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
// to special-case that loop.
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
p := 0
for n < len(src) && err == nil {
// Don't process too much at a time as the Spanner that will be
// called on this block may terminate early.
const maxChunk = 4096
max := len(src)
if v := n + maxChunk; v < max {
max = v
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
n += n2
if err2 != nil {
return n, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = n + size
}
return n, err
}
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
p := 0
for nSrc < len(src) && err == nil {
// Don't process too much at a time, as the work might be wasted if the
// destination buffer isn't large enough to hold the result or a
// transform returns an error early.
const maxChunk = 4096
max := len(src)
if n := nSrc + maxChunk; n < len(src) {
max = n
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
nDst += nDst2
nSrc += nSrc2
if err2 != nil {
return nDst, nSrc, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = nSrc + size
}
return nDst, nSrc, err
}

355
vendor/golang.org/x/text/runes/runes.go generated vendored Normal file
View File

@ -0,0 +1,355 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package runes provide transforms for UTF-8 encoded text.
package runes // import "golang.org/x/text/runes"
import (
"unicode"
"unicode/utf8"
"golang.org/x/text/transform"
)
// A Set is a collection of runes.
type Set interface {
// Contains returns true if r is contained in the set.
Contains(r rune) bool
}
type setFunc func(rune) bool
func (s setFunc) Contains(r rune) bool {
return s(r)
}
// Note: using funcs here instead of wrapping types result in cleaner
// documentation and a smaller API.
// In creates a Set with a Contains method that returns true for all runes in
// the given RangeTable.
func In(rt *unicode.RangeTable) Set {
return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
}
// In creates a Set with a Contains method that returns true for all runes not
// in the given RangeTable.
func NotIn(rt *unicode.RangeTable) Set {
return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
}
// Predicate creates a Set with a Contains method that returns f(r).
func Predicate(f func(rune) bool) Set {
return setFunc(f)
}
// Transformer implements the transform.Transformer interface.
type Transformer struct {
t transform.SpanningTransformer
}
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return t.t.Transform(dst, src, atEOF)
}
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
return t.t.Span(b, atEOF)
}
func (t Transformer) Reset() { t.t.Reset() }
// Bytes returns a new byte slice with the result of converting b using t. It
// calls Reset on t. It returns nil if any error was found. This can only happen
// if an error-producing Transformer is passed to If.
func (t Transformer) Bytes(b []byte) []byte {
b, _, err := transform.Bytes(t, b)
if err != nil {
return nil
}
return b
}
// String returns a string with the result of converting s using t. It calls
// Reset on t. It returns the empty string if any error was found. This can only
// happen if an error-producing Transformer is passed to If.
func (t Transformer) String(s string) string {
s, _, err := transform.String(t, s)
if err != nil {
return ""
}
return s
}
// TODO:
// - Copy: copying strings and bytes in whole-rune units.
// - Validation (maybe)
// - Well-formed-ness (maybe)
const runeErrorString = string(utf8.RuneError)
// Remove returns a Transformer that removes runes r for which s.Contains(r).
// Illegal input bytes are replaced by RuneError before being passed to f.
func Remove(s Set) Transformer {
if f, ok := s.(setFunc); ok {
// This little trick cuts the running time of BenchmarkRemove for sets
// created by Predicate roughly in half.
// TODO: special-case RangeTables as well.
return Transformer{remove(f)}
}
return Transformer{remove(s.Contains)}
}
// TODO: remove transform.RemoveFunc.
type remove func(r rune) bool
func (remove) Reset() {}
// Span implements transform.Spanner.
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) {
err = transform.ErrEndOfSpan
break
}
n += size
}
return
}
// Transform implements transform.Transformer.
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(utf8.RuneError) {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
}
nSrc++
continue
}
if t(r) {
nSrc += size
continue
}
if nDst+size > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < size; i++ {
dst[nDst] = src[nSrc]
nDst++
nSrc++
}
}
return
}
// Map returns a Transformer that maps the runes in the input using the given
// mapping. Illegal bytes in the input are converted to utf8.RuneError before
// being passed to the mapping func.
func Map(mapping func(rune) rune) Transformer {
return Transformer{mapper(mapping)}
}
type mapper func(rune) rune
func (mapper) Reset() {}
// Span implements transform.Spanner.
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); n += size {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) != r {
err = transform.ErrEndOfSpan
break
}
}
return n, err
}
// Transform implements transform.Transformer.
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
var replacement rune
var b [utf8.UTFMax]byte
for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf {
if replacement = t(r); replacement < utf8.RuneSelf {
if nDst == len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = byte(replacement)
nDst++
nSrc++
continue
}
size = 1
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
nSrc++
continue
}
} else if replacement = t(r); replacement == r {
if nDst+size > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < size; i++ {
dst[nDst] = src[nSrc]
nDst++
nSrc++
}
continue
}
n := utf8.EncodeRune(b[:], replacement)
if nDst+n > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < n; i++ {
dst[nDst] = b[i]
nDst++
}
nSrc += size
}
return
}
// ReplaceIllFormed returns a transformer that replaces all input bytes that are
// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
func ReplaceIllFormed() Transformer {
return Transformer{&replaceIllFormed{}}
}
type replaceIllFormed struct{ transform.NopResetter }
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
for n < len(src) {
// ASCII fast path.
if src[n] < utf8.RuneSelf {
n++
continue
}
r, size := utf8.DecodeRune(src[n:])
// Look for a valid non-ASCII rune.
if r != utf8.RuneError || size != 1 {
n += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
err = transform.ErrEndOfSpan
break
}
return n, err
}
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for nSrc < len(src) {
// ASCII fast path.
if r := src[nSrc]; r < utf8.RuneSelf {
if nDst == len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = r
nDst++
nSrc++
continue
}
// Look for a valid non-ASCII rune.
if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
err = transform.ErrShortDst
break
}
nDst += size
nSrc += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
nSrc++
}
return nDst, nSrc, err
}

705
vendor/golang.org/x/text/transform/transform.go generated vendored Normal file
View File

@ -0,0 +1,705 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transform provides reader and writer wrappers that transform the
// bytes passing through as well as various transformations. Example
// transformations provided by other packages include normalization and
// conversion between character sets.
package transform // import "golang.org/x/text/transform"
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
var (
// ErrShortDst means that the destination buffer was too short to
// receive all of the transformed bytes.
ErrShortDst = errors.New("transform: short destination buffer")
// ErrShortSrc means that the source buffer has insufficient data to
// complete the transformation.
ErrShortSrc = errors.New("transform: short source buffer")
// ErrEndOfSpan means that the input and output (the transformed input)
// are not identical.
ErrEndOfSpan = errors.New("transform: input and output are not identical")
// errInconsistentByteCount means that Transform returned success (nil
// error) but also returned nSrc inconsistent with the src argument.
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
// errShortInternal means that an internal buffer is not large enough
// to make progress and the Transform operation must be aborted.
errShortInternal = errors.New("transform: short internal buffer")
)
// Transformer transforms bytes.
type Transformer interface {
// Transform writes to dst the transformed bytes read from src, and
// returns the number of dst bytes written and src bytes read. The
// atEOF argument tells whether src represents the last bytes of the
// input.
//
// Callers should always process the nDst bytes produced and account
// for the nSrc bytes consumed before considering the error err.
//
// A nil error means that all of the transformed bytes (whether freshly
// transformed from src or left over from previous Transform calls)
// were written to dst. A nil error can be returned regardless of
// whether atEOF is true. If err is nil then nSrc must equal len(src);
// the converse is not necessarily true.
//
// ErrShortDst means that dst was too short to receive all of the
// transformed bytes. ErrShortSrc means that src had insufficient data
// to complete the transformation. If both conditions apply, then
// either error may be returned. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
// Reset resets the state and allows a Transformer to be reused.
Reset()
}
// SpanningTransformer extends the Transformer interface with a Span method
// that determines how much of the input already conforms to the Transformer.
type SpanningTransformer interface {
Transformer
// Span returns a position in src such that transforming src[:n] results in
// identical output src[:n] for these bytes. It does not necessarily return
// the largest such n. The atEOF argument tells whether src represents the
// last bytes of the input.
//
// Callers should always account for the n bytes consumed before
// considering the error err.
//
// A nil error means that all input bytes are known to be identical to the
// output produced by the Transformer. A nil error can be returned
// regardless of whether atEOF is true. If err is nil, then n must
// equal len(src); the converse is not necessarily true.
//
// ErrEndOfSpan means that the Transformer output may differ from the
// input after n bytes. Note that n may be len(src), meaning that the output
// would contain additional bytes after otherwise identical output.
// ErrShortSrc means that src had insufficient data to determine whether the
// remaining bytes would change. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
//
// Calling Span can modify the Transformer state as a side effect. In
// effect, it does the transformation just as calling Transform would, only
// without copying to a destination buffer and only up to a point it can
// determine the input and output bytes are the same. This is obviously more
// limited than calling Transform, but can be more efficient in terms of
// copying and allocating buffers. Calls to Span and Transform may be
// interleaved.
Span(src []byte, atEOF bool) (n int, err error)
}
// NopResetter can be embedded by implementations of Transformer to add a nop
// Reset method.
type NopResetter struct{}
// Reset implements the Reset method of the Transformer interface.
func (NopResetter) Reset() {}
// Reader wraps another io.Reader by transforming the bytes read.
type Reader struct {
r io.Reader
t Transformer
err error
// dst[dst0:dst1] contains bytes that have been transformed by t but
// not yet copied out via Read.
dst []byte
dst0, dst1 int
// src[src0:src1] contains bytes that have been read from r but not
// yet transformed through t.
src []byte
src0, src1 int
// transformComplete is whether the transformation is complete,
// regardless of whether or not it was successful.
transformComplete bool
}
const defaultBufSize = 4096
// NewReader returns a new Reader that wraps r by transforming the bytes read
// via t. It calls Reset on t.
func NewReader(r io.Reader, t Transformer) *Reader {
t.Reset()
return &Reader{
r: r,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Read implements the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
n, err := 0, error(nil)
for {
// Copy out any transformed bytes and return the final error if we are done.
if r.dst0 != r.dst1 {
n = copy(p, r.dst[r.dst0:r.dst1])
r.dst0 += n
if r.dst0 == r.dst1 && r.transformComplete {
return n, r.err
}
return n, nil
} else if r.transformComplete {
return 0, r.err
}
// Try to transform some source bytes, or to flush the transformer if we
// are out of source bytes. We do this even if r.r.Read returned an error.
// As the io.Reader documentation says, "process the n > 0 bytes returned
// before considering the error".
if r.src0 != r.src1 || r.err != nil {
r.dst0 = 0
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
r.src0 += n
switch {
case err == nil:
if r.src0 != r.src1 {
r.err = errInconsistentByteCount
}
// The Transform call was successful; we are complete if we
// cannot read more bytes into src.
r.transformComplete = r.err != nil
continue
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
// Make room in dst by copying out, and try again.
continue
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
// Read more bytes into src via the code below, and try again.
default:
r.transformComplete = true
// The reader error (r.err) takes precedence over the
// transformer error (err) unless r.err is nil or io.EOF.
if r.err == nil || r.err == io.EOF {
r.err = err
}
continue
}
}
// Move any untransformed source bytes to the start of the buffer
// and read more bytes.
if r.src0 != 0 {
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
}
n, r.err = r.r.Read(r.src[r.src1:])
r.src1 += n
}
}
// TODO: implement ReadByte (and ReadRune??).
// Writer wraps another io.Writer by transforming the bytes read.
// The user needs to call Close to flush unwritten bytes that may
// be buffered.
type Writer struct {
w io.Writer
t Transformer
dst []byte
// src[:n] contains bytes that have not yet passed through t.
src []byte
n int
}
// NewWriter returns a new Writer that wraps w by transforming the bytes written
// via t. It calls Reset on t.
func NewWriter(w io.Writer, t Transformer) *Writer {
t.Reset()
return &Writer{
w: w,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Write implements the io.Writer interface. If there are not enough
// bytes available to complete a Transform, the bytes will be buffered
// for the next write. Call Close to convert the remaining bytes.
func (w *Writer) Write(data []byte) (n int, err error) {
src := data
if w.n > 0 {
// Append bytes from data to the last remainder.
// TODO: limit the amount copied on first try.
n = copy(w.src[w.n:], data)
w.n += n
src = w.src[:w.n]
}
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return n, werr
}
src = src[nSrc:]
if w.n == 0 {
n += nSrc
} else if len(src) <= n {
// Enough bytes from w.src have been consumed. We make src point
// to data instead to reduce the copying.
w.n = 0
n -= len(src)
src = data[n:]
if n < len(data) && (err == nil || err == ErrShortSrc) {
continue
}
}
switch err {
case ErrShortDst:
// This error is okay as long as we are making progress.
if nDst > 0 || nSrc > 0 {
continue
}
case ErrShortSrc:
if len(src) < len(w.src) {
m := copy(w.src, src)
// If w.n > 0, bytes from data were already copied to w.src and n
// was already set to the number of bytes consumed.
if w.n == 0 {
n += m
}
w.n = m
err = nil
} else if nDst > 0 || nSrc > 0 {
// Not enough buffer to store the remainder. Keep processing as
// long as there is progress. Without this case, transforms that
// require a lookahead larger than the buffer may result in an
// error. This is not something one may expect to be common in
// practice, but it may occur when buffers are set to small
// sizes during testing.
continue
}
case nil:
if w.n > 0 {
err = errInconsistentByteCount
}
}
return n, err
}
}
// Close implements the io.Closer interface.
func (w *Writer) Close() error {
src := w.src[:w.n]
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return werr
}
if err != ErrShortDst {
return err
}
src = src[nSrc:]
}
}
type nop struct{ NopResetter }
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := copy(dst, src)
if n < len(src) {
err = ErrShortDst
}
return n, n, err
}
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
return len(src), nil
}
type discard struct{ NopResetter }
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return 0, len(src), nil
}
var (
// Discard is a Transformer for which all Transform calls succeed
// by consuming all bytes and writing nothing.
Discard Transformer = discard{}
// Nop is a SpanningTransformer that copies src to dst.
Nop SpanningTransformer = nop{}
)
// chain is a sequence of links. A chain with N Transformers has N+1 links and
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
// buffers owned by the chain. The i'th link transforms bytes from the i'th
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
type chain struct {
link []link
err error
// errStart is the index at which the error occurred plus 1. Processing
// errStart at this level at the next call to Transform. As long as
// errStart > 0, chain will not consume any more source bytes.
errStart int
}
func (c *chain) fatalError(errIndex int, err error) {
if i := errIndex + 1; i > c.errStart {
c.errStart = i
c.err = err
}
}
type link struct {
t Transformer
// b[p:n] holds the bytes to be transformed by t.
b []byte
p int
n int
}
func (l *link) src() []byte {
return l.b[l.p:l.n]
}
func (l *link) dst() []byte {
return l.b[l.n:]
}
// Chain returns a Transformer that applies t in sequence.
func Chain(t ...Transformer) Transformer {
if len(t) == 0 {
return nop{}
}
c := &chain{link: make([]link, len(t)+1)}
for i, tt := range t {
c.link[i].t = tt
}
// Allocate intermediate buffers.
b := make([][defaultBufSize]byte, len(t)-1)
for i := range b {
c.link[i+1].b = b[i][:]
}
return c
}
// Reset resets the state of Chain. It calls Reset on all the Transformers.
func (c *chain) Reset() {
for i, l := range c.link {
if l.t != nil {
l.t.Reset()
}
c.link[i].p, c.link[i].n = 0, 0
}
}
// TODO: make chain use Span (is going to be fun to implement!)
// Transform applies the transformers of c in sequence.
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// Set up src and dst in the chain.
srcL := &c.link[0]
dstL := &c.link[len(c.link)-1]
srcL.b, srcL.p, srcL.n = src, 0, len(src)
dstL.b, dstL.n = dst, 0
var lastFull, needProgress bool // for detecting progress
// i is the index of the next Transformer to apply, for i in [low, high].
// low is the lowest index for which c.link[low] may still produce bytes.
// high is the highest index for which c.link[high] has a Transformer.
// The error returned by Transform determines whether to increase or
// decrease i. We try to completely fill a buffer before converting it.
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
in, out := &c.link[i], &c.link[i+1]
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
out.n += nDst
in.p += nSrc
if i > 0 && in.p == in.n {
in.p, in.n = 0, 0
}
needProgress, lastFull = lastFull, false
switch err0 {
case ErrShortDst:
// Process the destination buffer next. Return if we are already
// at the high index.
if i == high {
return dstL.n, srcL.p, ErrShortDst
}
if out.n != 0 {
i++
// If the Transformer at the next index is not able to process any
// source bytes there is nothing that can be done to make progress
// and the bytes will remain unprocessed. lastFull is used to
// detect this and break out of the loop with a fatal error.
lastFull = true
continue
}
// The destination buffer was too small, but is completely empty.
// Return a fatal error as this transformation can never complete.
c.fatalError(i, errShortInternal)
case ErrShortSrc:
if i == 0 {
// Save ErrShortSrc in err. All other errors take precedence.
err = ErrShortSrc
break
}
// Source bytes were depleted before filling up the destination buffer.
// Verify we made some progress, move the remaining bytes to the errStart
// and try to get more source bytes.
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
// There were not enough source bytes to proceed while the source
// buffer cannot hold any more bytes. Return a fatal error as this
// transformation can never complete.
c.fatalError(i, errShortInternal)
break
}
// in.b is an internal buffer and we can make progress.
in.p, in.n = 0, copy(in.b, in.src())
fallthrough
case nil:
// if i == low, we have depleted the bytes at index i or any lower levels.
// In that case we increase low and i. In all other cases we decrease i to
// fetch more bytes before proceeding to the next index.
if i > low {
i--
continue
}
default:
c.fatalError(i, err0)
}
// Exhausted level low or fatal error: increase low and continue
// to process the bytes accepted so far.
i++
low = i
}
// If c.errStart > 0, this means we found a fatal error. We will clear
// all upstream buffers. At this point, no more progress can be made
// downstream, as Transform would have bailed while handling ErrShortDst.
if c.errStart > 0 {
for i := 1; i < c.errStart; i++ {
c.link[i].p, c.link[i].n = 0, 0
}
err, c.errStart, c.err = c.err, 0, nil
}
return dstL.n, srcL.p, err
}
// Deprecated: Use runes.Remove instead.
func RemoveFunc(f func(r rune) bool) Transformer {
return removeF(f)
}
type removeF func(r rune) bool
func (removeF) Reset() {}
// Transform implements the Transformer interface.
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
if r = rune(src[0]); r < utf8.RuneSelf {
sz = 1
} else {
r, sz = utf8.DecodeRune(src)
if sz == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src) {
err = ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(r) {
if nDst+3 > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], "\uFFFD")
}
nSrc++
continue
}
}
if !t(r) {
if nDst+sz > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], src[:sz])
}
nSrc += sz
}
return
}
// grow returns a new []byte that is longer than b, and copies the first n bytes
// of b to the start of the new slice.
func grow(b []byte, n int) []byte {
m := len(b)
if m <= 32 {
m = 64
} else if m <= 256 {
m *= 2
} else {
m += m >> 1
}
buf := make([]byte, m)
copy(buf, b[:n])
return buf
}
const initialBufSize = 128
// String returns a string with the result of converting s[:n] using t, where
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
func String(t Transformer, s string) (result string, n int, err error) {
t.Reset()
if s == "" {
// Fast path for the common case for empty input. Results in about a
// 86% reduction of running time for BenchmarkStringLowerEmpty.
if _, _, err := t.Transform(nil, nil, true); err == nil {
return "", 0, nil
}
}
// Allocate only once. Note that both dst and src escape when passed to
// Transform.
buf := [2 * initialBufSize]byte{}
dst := buf[:initialBufSize:initialBufSize]
src := buf[initialBufSize : 2*initialBufSize]
// The input string s is transformed in multiple chunks (starting with a
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
nDst, nSrc := 0, 0
pDst, pSrc := 0, 0
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
// result will equal the first pPrefix bytes of s. It is not guaranteed to
// be the largest such value, but if pPrefix, len(result) and len(s) are
// all equal after the final transform (i.e. calling Transform with atEOF
// being true returned nil error) then we don't need to allocate a new
// result string.
pPrefix := 0
for {
// Invariant: pDst == pPrefix && pSrc == pPrefix.
n := copy(src, s[pSrc:])
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// TODO: let transformers implement an optional Spanner interface, akin
// to norm's QuickSpan. This would even allow us to avoid any allocation.
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
break
}
pPrefix = pSrc
if err == ErrShortDst {
// A buffer can only be short if a transformer modifies its input.
break
} else if err == ErrShortSrc {
if nSrc == 0 {
// No progress was made.
break
}
// Equal so far and !atEOF, so continue checking.
} else if err != nil || pPrefix == len(s) {
return string(s[:pPrefix]), pPrefix, err
}
}
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
// We have transformed the first pSrc bytes of the input s to become pDst
// transformed bytes. Those transformed bytes are discontiguous: the first
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
// that they become one contiguous slice: dst[:pDst].
if pPrefix != 0 {
newDst := dst
if pDst > len(newDst) {
newDst = make([]byte, len(s)+nDst-nSrc)
}
copy(newDst[pPrefix:pDst], dst[:nDst])
copy(newDst[:pPrefix], s[:pPrefix])
dst = newDst
}
// Prevent duplicate Transform calls with atEOF being true at the end of
// the input. Also return if we have an unrecoverable error.
if (err == nil && pSrc == len(s)) ||
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
return string(dst[:pDst]), pSrc, err
}
// Transform the remaining input, growing dst and src buffers as necessary.
for {
n := copy(src, s[pSrc:])
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
// make progress. This may avoid excessive allocations.
if err == ErrShortDst {
if nDst == 0 {
dst = grow(dst, pDst)
}
} else if err == ErrShortSrc {
if nSrc == 0 {
src = grow(src, 0)
}
} else if err != nil || pSrc == len(s) {
return string(dst[:pDst]), pSrc, err
}
}
}
// Bytes returns a new byte slice with the result of converting b[:n] using t,
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
return doAppend(t, 0, make([]byte, len(b)), b)
}
// Append appends the result of converting src[:n] using t to dst, where
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
if len(dst) == cap(dst) {
n := len(src) + len(dst) // It is okay for this to be 0.
b := make([]byte, n)
dst = b[:copy(b, dst)]
}
return doAppend(t, len(dst), dst[:cap(dst)], src)
}
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
t.Reset()
pSrc := 0
for {
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
pDst += nDst
pSrc += nSrc
if err != ErrShortDst {
return dst[:pDst], pSrc, err
}
// Grow the destination buffer, but do not grow as long as we can make
// progress. This may avoid excessive allocations.
if nDst == 0 {
dst = grow(dst, pDst)
}
}
}

36
vendor/gopkg.in/asn1-ber.v1/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,36 @@
language: go
matrix:
include:
- go: 1.2.x
env: GOOS=linux GOARCH=amd64
- go: 1.2.x
env: GOOS=linux GOARCH=386
- go: 1.2.x
env: GOOS=windows GOARCH=amd64
- go: 1.2.x
env: GOOS=windows GOARCH=386
- go: 1.3.x
- go: 1.4.x
- go: 1.5.x
- go: 1.6.x
- go: 1.7.x
- go: 1.8.x
- go: 1.9.x
- go: 1.10.x
- go: 1.11.x
env: GOOS=linux GOARCH=amd64
- go: 1.11.x
env: GOOS=linux GOARCH=386
- go: 1.11.x
env: GOOS=windows GOARCH=amd64
- go: 1.11.x
env: GOOS=windows GOARCH=386
- go: tip
go_import_path: gopkg.in/asn-ber.v1
install:
- go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v
- go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v
- go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover
- go build -v ./...
script:
- go test -v -cover ./... || go test -v ./...

22
vendor/gopkg.in/asn1-ber.v1/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
Portions copyright (c) 2015-2016 go-asn1-ber Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

24
vendor/gopkg.in/asn1-ber.v1/README.md generated vendored Normal file
View File

@ -0,0 +1,24 @@
[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber)
ASN1 BER Encoding / Decoding Library for the GO programming language.
---------------------------------------------------------------------
Required libraries:
None
Working:
Very basic encoding / decoding needed for LDAP protocol
Tests Implemented:
A few
TODO:
Fix all encoding / decoding to conform to ASN1 BER spec
Implement Tests / Benchmarks
---
The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
The design is licensed under the Creative Commons 3.0 Attributions license.
Read this article for more details: http://blog.golang.org/gopher

512
vendor/gopkg.in/asn1-ber.v1/ber.go generated vendored Normal file
View File

@ -0,0 +1,512 @@
package ber
import (
"bytes"
"errors"
"fmt"
"io"
"math"
"os"
"reflect"
)
// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for
// no limit.
var MaxPacketLengthBytes int64 = math.MaxInt32
type Packet struct {
Identifier
Value interface{}
ByteValue []byte
Data *bytes.Buffer
Children []*Packet
Description string
}
type Identifier struct {
ClassType Class
TagType Type
Tag Tag
}
type Tag uint64
const (
TagEOC Tag = 0x00
TagBoolean Tag = 0x01
TagInteger Tag = 0x02
TagBitString Tag = 0x03
TagOctetString Tag = 0x04
TagNULL Tag = 0x05
TagObjectIdentifier Tag = 0x06
TagObjectDescriptor Tag = 0x07
TagExternal Tag = 0x08
TagRealFloat Tag = 0x09
TagEnumerated Tag = 0x0a
TagEmbeddedPDV Tag = 0x0b
TagUTF8String Tag = 0x0c
TagRelativeOID Tag = 0x0d
TagSequence Tag = 0x10
TagSet Tag = 0x11
TagNumericString Tag = 0x12
TagPrintableString Tag = 0x13
TagT61String Tag = 0x14
TagVideotexString Tag = 0x15
TagIA5String Tag = 0x16
TagUTCTime Tag = 0x17
TagGeneralizedTime Tag = 0x18
TagGraphicString Tag = 0x19
TagVisibleString Tag = 0x1a
TagGeneralString Tag = 0x1b
TagUniversalString Tag = 0x1c
TagCharacterString Tag = 0x1d
TagBMPString Tag = 0x1e
TagBitmask Tag = 0x1f // xxx11111b
// HighTag indicates the start of a high-tag byte sequence
HighTag Tag = 0x1f // xxx11111b
// HighTagContinueBitmask indicates the high-tag byte sequence should continue
HighTagContinueBitmask Tag = 0x80 // 10000000b
// HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte
HighTagValueBitmask Tag = 0x7f // 01111111b
)
const (
// LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used
LengthLongFormBitmask = 0x80
// LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence
LengthValueBitmask = 0x7f
// LengthIndefinite is returned from readLength to indicate an indefinite length
LengthIndefinite = -1
)
var tagMap = map[Tag]string{
TagEOC: "EOC (End-of-Content)",
TagBoolean: "Boolean",
TagInteger: "Integer",
TagBitString: "Bit String",
TagOctetString: "Octet String",
TagNULL: "NULL",
TagObjectIdentifier: "Object Identifier",
TagObjectDescriptor: "Object Descriptor",
TagExternal: "External",
TagRealFloat: "Real (float)",
TagEnumerated: "Enumerated",
TagEmbeddedPDV: "Embedded PDV",
TagUTF8String: "UTF8 String",
TagRelativeOID: "Relative-OID",
TagSequence: "Sequence and Sequence of",
TagSet: "Set and Set OF",
TagNumericString: "Numeric String",
TagPrintableString: "Printable String",
TagT61String: "T61 String",
TagVideotexString: "Videotex String",
TagIA5String: "IA5 String",
TagUTCTime: "UTC Time",
TagGeneralizedTime: "Generalized Time",
TagGraphicString: "Graphic String",
TagVisibleString: "Visible String",
TagGeneralString: "General String",
TagUniversalString: "Universal String",
TagCharacterString: "Character String",
TagBMPString: "BMP String",
}
type Class uint8
const (
ClassUniversal Class = 0 // 00xxxxxxb
ClassApplication Class = 64 // 01xxxxxxb
ClassContext Class = 128 // 10xxxxxxb
ClassPrivate Class = 192 // 11xxxxxxb
ClassBitmask Class = 192 // 11xxxxxxb
)
var ClassMap = map[Class]string{
ClassUniversal: "Universal",
ClassApplication: "Application",
ClassContext: "Context",
ClassPrivate: "Private",
}
type Type uint8
const (
TypePrimitive Type = 0 // xx0xxxxxb
TypeConstructed Type = 32 // xx1xxxxxb
TypeBitmask Type = 32 // xx1xxxxxb
)
var TypeMap = map[Type]string{
TypePrimitive: "Primitive",
TypeConstructed: "Constructed",
}
var Debug bool = false
func PrintBytes(out io.Writer, buf []byte, indent string) {
data_lines := make([]string, (len(buf)/30)+1)
num_lines := make([]string, (len(buf)/30)+1)
for i, b := range buf {
data_lines[i/30] += fmt.Sprintf("%02x ", b)
num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
}
for i := 0; i < len(data_lines); i++ {
out.Write([]byte(indent + data_lines[i] + "\n"))
out.Write([]byte(indent + num_lines[i] + "\n\n"))
}
}
func PrintPacket(p *Packet) {
printPacket(os.Stdout, p, 0, false)
}
func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
indent_str := ""
for len(indent_str) != indent {
indent_str += " "
}
class_str := ClassMap[p.ClassType]
tagtype_str := TypeMap[p.TagType]
tag_str := fmt.Sprintf("0x%02X", p.Tag)
if p.ClassType == ClassUniversal {
tag_str = tagMap[p.Tag]
}
value := fmt.Sprint(p.Value)
description := ""
if p.Description != "" {
description = p.Description + ": "
}
fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value)
if printBytes {
PrintBytes(out, p.Bytes(), indent_str)
}
for _, child := range p.Children {
printPacket(out, child, indent+1, printBytes)
}
}
// ReadPacket reads a single Packet from the reader
func ReadPacket(reader io.Reader) (*Packet, error) {
p, _, err := readPacket(reader)
if err != nil {
return nil, err
}
return p, nil
}
func DecodeString(data []byte) string {
return string(data)
}
func ParseInt64(bytes []byte) (ret int64, err error) {
if len(bytes) > 8 {
// We'll overflow an int64 in this case.
err = fmt.Errorf("integer too large")
return
}
for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
ret <<= 8
ret |= int64(bytes[bytesRead])
}
// Shift up and down in order to sign extend the result.
ret <<= 64 - uint8(len(bytes))*8
ret >>= 64 - uint8(len(bytes))*8
return
}
func encodeInteger(i int64) []byte {
n := int64Length(i)
out := make([]byte, n)
var j int
for ; n > 0; n-- {
out[j] = (byte(i >> uint((n-1)*8)))
j++
}
return out
}
func int64Length(i int64) (numBytes int) {
numBytes = 1
for i > 127 {
numBytes++
i >>= 8
}
for i < -128 {
numBytes++
i >>= 8
}
return
}
// DecodePacket decodes the given bytes into a single Packet
// If a decode error is encountered, nil is returned.
func DecodePacket(data []byte) *Packet {
p, _, _ := readPacket(bytes.NewBuffer(data))
return p
}
// DecodePacketErr decodes the given bytes into a single Packet
// If a decode error is encountered, nil is returned
func DecodePacketErr(data []byte) (*Packet, error) {
p, _, err := readPacket(bytes.NewBuffer(data))
if err != nil {
return nil, err
}
return p, nil
}
// readPacket reads a single Packet from the reader, returning the number of bytes read
func readPacket(reader io.Reader) (*Packet, int, error) {
identifier, length, read, err := readHeader(reader)
if err != nil {
return nil, read, err
}
p := &Packet{
Identifier: identifier,
}
p.Data = new(bytes.Buffer)
p.Children = make([]*Packet, 0, 2)
p.Value = nil
if p.TagType == TypeConstructed {
// TODO: if universal, ensure tag type is allowed to be constructed
// Track how much content we've read
contentRead := 0
for {
if length != LengthIndefinite {
// End if we've read what we've been told to
if contentRead == length {
break
}
// Detect if a packet boundary didn't fall on the expected length
if contentRead > length {
return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead)
}
}
// Read the next packet
child, r, err := readPacket(reader)
if err != nil {
return nil, read, err
}
contentRead += r
read += r
// Test is this is the EOC marker for our packet
if isEOCPacket(child) {
if length == LengthIndefinite {
break
}
return nil, read, errors.New("eoc child not allowed with definite length")
}
// Append and continue
p.AppendChild(child)
}
return p, read, nil
}
if length == LengthIndefinite {
return nil, read, errors.New("indefinite length used with primitive type")
}
// Read definite-length content
if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes {
return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes)
}
content := make([]byte, length, length)
if length > 0 {
_, err := io.ReadFull(reader, content)
if err != nil {
if err == io.EOF {
return nil, read, io.ErrUnexpectedEOF
}
return nil, read, err
}
read += length
}
if p.ClassType == ClassUniversal {
p.Data.Write(content)
p.ByteValue = content
switch p.Tag {
case TagEOC:
case TagBoolean:
val, _ := ParseInt64(content)
p.Value = val != 0
case TagInteger:
p.Value, _ = ParseInt64(content)
case TagBitString:
case TagOctetString:
// the actual string encoding is not known here
// (e.g. for LDAP content is already an UTF8-encoded
// string). Return the data without further processing
p.Value = DecodeString(content)
case TagNULL:
case TagObjectIdentifier:
case TagObjectDescriptor:
case TagExternal:
case TagRealFloat:
case TagEnumerated:
p.Value, _ = ParseInt64(content)
case TagEmbeddedPDV:
case TagUTF8String:
p.Value = DecodeString(content)
case TagRelativeOID:
case TagSequence:
case TagSet:
case TagNumericString:
case TagPrintableString:
p.Value = DecodeString(content)
case TagT61String:
case TagVideotexString:
case TagIA5String:
case TagUTCTime:
case TagGeneralizedTime:
case TagGraphicString:
case TagVisibleString:
case TagGeneralString:
case TagUniversalString:
case TagCharacterString:
case TagBMPString:
}
} else {
p.Data.Write(content)
}
return p, read, nil
}
func (p *Packet) Bytes() []byte {
var out bytes.Buffer
out.Write(encodeIdentifier(p.Identifier))
out.Write(encodeLength(p.Data.Len()))
out.Write(p.Data.Bytes())
return out.Bytes()
}
func (p *Packet) AppendChild(child *Packet) {
p.Data.Write(child.Bytes())
p.Children = append(p.Children, child)
}
func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
p := new(Packet)
p.ClassType = ClassType
p.TagType = TagType
p.Tag = Tag
p.Data = new(bytes.Buffer)
p.Children = make([]*Packet, 0, 2)
p.Value = Value
p.Description = Description
if Value != nil {
v := reflect.ValueOf(Value)
if ClassType == ClassUniversal {
switch Tag {
case TagOctetString:
sv, ok := v.Interface().(string)
if ok {
p.Data.Write([]byte(sv))
}
}
}
}
return p
}
func NewSequence(Description string) *Packet {
return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description)
}
func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet {
intValue := int64(0)
if Value {
intValue = 1
}
p := Encode(ClassType, TagType, Tag, nil, Description)
p.Value = Value
p.Data.Write(encodeInteger(intValue))
return p
}
func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
p := Encode(ClassType, TagType, Tag, nil, Description)
p.Value = Value
switch v := Value.(type) {
case int:
p.Data.Write(encodeInteger(int64(v)))
case uint:
p.Data.Write(encodeInteger(int64(v)))
case int64:
p.Data.Write(encodeInteger(v))
case uint64:
// TODO : check range or add encodeUInt...
p.Data.Write(encodeInteger(int64(v)))
case int32:
p.Data.Write(encodeInteger(int64(v)))
case uint32:
p.Data.Write(encodeInteger(int64(v)))
case int16:
p.Data.Write(encodeInteger(int64(v)))
case uint16:
p.Data.Write(encodeInteger(int64(v)))
case int8:
p.Data.Write(encodeInteger(int64(v)))
case uint8:
p.Data.Write(encodeInteger(int64(v)))
default:
// TODO : add support for big.Int ?
panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
}
return p
}
func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet {
p := Encode(ClassType, TagType, Tag, nil, Description)
p.Value = Value
p.Data.Write([]byte(Value))
return p
}

25
vendor/gopkg.in/asn1-ber.v1/content_int.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
package ber
func encodeUnsignedInteger(i uint64) []byte {
n := uint64Length(i)
out := make([]byte, n)
var j int
for ; n > 0; n-- {
out[j] = (byte(i >> uint((n-1)*8)))
j++
}
return out
}
func uint64Length(i uint64) (numBytes int) {
numBytes = 1
for i > 255 {
numBytes++
i >>= 8
}
return
}

35
vendor/gopkg.in/asn1-ber.v1/header.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package ber
import (
"errors"
"fmt"
"io"
)
func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) {
if i, c, err := readIdentifier(reader); err != nil {
return Identifier{}, 0, read, err
} else {
identifier = i
read += c
}
if l, c, err := readLength(reader); err != nil {
return Identifier{}, 0, read, err
} else {
length = l
read += c
}
// Validate length type with identifier (x.600, 8.1.3.2.a)
if length == LengthIndefinite && identifier.TagType == TypePrimitive {
return Identifier{}, 0, read, errors.New("indefinite length used with primitive type")
}
if length < LengthIndefinite {
err = fmt.Errorf("length cannot be less than %d", LengthIndefinite)
return
}
return identifier, length, read, nil
}

112
vendor/gopkg.in/asn1-ber.v1/identifier.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
package ber
import (
"errors"
"fmt"
"io"
)
func readIdentifier(reader io.Reader) (Identifier, int, error) {
identifier := Identifier{}
read := 0
// identifier byte
b, err := readByte(reader)
if err != nil {
if Debug {
fmt.Printf("error reading identifier byte: %v\n", err)
}
return Identifier{}, read, err
}
read++
identifier.ClassType = Class(b) & ClassBitmask
identifier.TagType = Type(b) & TypeBitmask
if tag := Tag(b) & TagBitmask; tag != HighTag {
// short-form tag
identifier.Tag = tag
return identifier, read, nil
}
// high-tag-number tag
tagBytes := 0
for {
b, err := readByte(reader)
if err != nil {
if Debug {
fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
}
return Identifier{}, read, err
}
tagBytes++
read++
// Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b)
identifier.Tag <<= 7
identifier.Tag |= Tag(b) & HighTagValueBitmask
// First byte may not be all zeros (x.690, 8.1.2.4.2.c)
if tagBytes == 1 && identifier.Tag == 0 {
return Identifier{}, read, errors.New("invalid first high-tag-number tag byte")
}
// Overflow of int64
// TODO: support big int tags?
if tagBytes > 9 {
return Identifier{}, read, errors.New("high-tag-number tag overflow")
}
// Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a)
if Tag(b)&HighTagContinueBitmask == 0 {
break
}
}
return identifier, read, nil
}
func encodeIdentifier(identifier Identifier) []byte {
b := []byte{0x0}
b[0] |= byte(identifier.ClassType)
b[0] |= byte(identifier.TagType)
if identifier.Tag < HighTag {
// Short-form
b[0] |= byte(identifier.Tag)
} else {
// high-tag-number
b[0] |= byte(HighTag)
tag := identifier.Tag
b = append(b, encodeHighTag(tag)...)
}
return b
}
func encodeHighTag(tag Tag) []byte {
// set cap=4 to hopefully avoid additional allocations
b := make([]byte, 0, 4)
for tag != 0 {
// t := last 7 bits of tag (HighTagValueBitmask = 0x7F)
t := tag & HighTagValueBitmask
// right shift tag 7 to remove what was just pulled off
tag >>= 7
// if b already has entries this entry needs a continuation bit (0x80)
if len(b) != 0 {
t |= HighTagContinueBitmask
}
b = append(b, byte(t))
}
// reverse
// since bits were pulled off 'tag' small to high the byte slice is in reverse order.
// example: tag = 0xFF results in {0x7F, 0x01 + 0x80 (continuation bit)}
// this needs to be reversed into 0x81 0x7F
for i, j := 0, len(b)-1; i < len(b)/2; i++ {
b[i], b[j-i] = b[j-i], b[i]
}
return b
}

81
vendor/gopkg.in/asn1-ber.v1/length.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
package ber
import (
"errors"
"fmt"
"io"
)
func readLength(reader io.Reader) (length int, read int, err error) {
// length byte
b, err := readByte(reader)
if err != nil {
if Debug {
fmt.Printf("error reading length byte: %v\n", err)
}
return 0, 0, err
}
read++
switch {
case b == 0xFF:
// Invalid 0xFF (x.600, 8.1.3.5.c)
return 0, read, errors.New("invalid length byte 0xff")
case b == LengthLongFormBitmask:
// Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6)
length = LengthIndefinite
case b&LengthLongFormBitmask == 0:
// Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4)
length = int(b) & LengthValueBitmask
case b&LengthLongFormBitmask != 0:
// Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b)
lengthBytes := int(b) & LengthValueBitmask
// Protect against overflow
// TODO: support big int length?
if lengthBytes > 8 {
return 0, read, errors.New("long-form length overflow")
}
// Accumulate into a 64-bit variable
var length64 int64
for i := 0; i < lengthBytes; i++ {
b, err = readByte(reader)
if err != nil {
if Debug {
fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
}
return 0, read, err
}
read++
// x.600, 8.1.3.5
length64 <<= 8
length64 |= int64(b)
}
// Cast to a platform-specific integer
length = int(length64)
// Ensure we didn't overflow
if int64(length) != length64 {
return 0, read, errors.New("long-form length overflow")
}
default:
return 0, read, errors.New("invalid length byte")
}
return length, read, nil
}
func encodeLength(length int) []byte {
length_bytes := encodeUnsignedInteger(uint64(length))
if length > 127 || len(length_bytes) > 1 {
longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))}
longFormBytes = append(longFormBytes, length_bytes...)
length_bytes = longFormBytes
}
return length_bytes
}

24
vendor/gopkg.in/asn1-ber.v1/util.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package ber
import "io"
func readByte(reader io.Reader) (byte, error) {
bytes := make([]byte, 1, 1)
_, err := io.ReadFull(reader, bytes)
if err != nil {
if err == io.EOF {
return 0, io.ErrUnexpectedEOF
}
return 0, err
}
return bytes[0], nil
}
func isEOCPacket(p *Packet) bool {
return p != nil &&
p.Tag == TagEOC &&
p.ClassType == ClassUniversal &&
p.TagType == TypePrimitive &&
len(p.ByteValue) == 0 &&
len(p.Children) == 0
}

6
vendor/gopkg.in/ini.v1/.gitignore generated vendored Normal file
View File

@ -0,0 +1,6 @@
testdata/conf_out.ini
ini.sublime-project
ini.sublime-workspace
testdata/conf_reflect.ini
.idea
/.vscode

20
vendor/gopkg.in/ini.v1/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,20 @@
sudo: false
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
install: skip
script:
- go get golang.org/x/tools/cmd/cover
- go get github.com/smartystreets/goconvey
- mkdir -p $HOME/gopath/src/gopkg.in
- ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1
- cd $HOME/gopath/src/gopkg.in/ini.v1
- go test -v -cover -race

191
vendor/gopkg.in/ini.v1/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright 2014 Unknwon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

15
vendor/gopkg.in/ini.v1/Makefile generated vendored Normal file
View File

@ -0,0 +1,15 @@
.PHONY: build test bench vet coverage
build: vet bench
test:
go test -v -cover -race
bench:
go test -v -cover -race -test.bench=. -test.benchmem
vet:
go vet
coverage:
go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out

39
vendor/gopkg.in/ini.v1/README.md generated vendored Normal file
View File

@ -0,0 +1,39 @@
# INI
[![Build Status](https://img.shields.io/travis/go-ini/ini/master.svg?style=for-the-badge&logo=travis)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini)
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
Package ini provides INI file read and write functionality in Go.
## Features
- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
- Read with recursion values.
- Read with parent-child sections.
- Read with auto-increment key names.
- Read with multiple-line values.
- Read with tons of helper methods.
- Read and convert values to Go types.
- Read and **WRITE** comments of sections and keys.
- Manipulate sections, keys and comments with ease.
- Keep sections and keys in order as you parse and save.
## Installation
The minimum requirement of Go is **1.6**.
```sh
$ go get gopkg.in/ini.v1
```
Please add `-u` flag to update in the future.
## Getting Help
- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
## License
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.

74
vendor/gopkg.in/ini.v1/data_source.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
)
var (
_ dataSource = (*sourceFile)(nil)
_ dataSource = (*sourceData)(nil)
_ dataSource = (*sourceReadCloser)(nil)
)
// dataSource is an interface that returns object which can be read and closed.
type dataSource interface {
ReadCloser() (io.ReadCloser, error)
}
// sourceFile represents an object that contains content on the local file system.
type sourceFile struct {
name string
}
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
return os.Open(s.name)
}
// sourceData represents an object that contains content in memory.
type sourceData struct {
data []byte
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
}
// sourceReadCloser represents an input stream with Close method.
type sourceReadCloser struct {
reader io.ReadCloser
}
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
return s.reader, nil
}
func parseDataSource(source interface{}) (dataSource, error) {
switch s := source.(type) {
case string:
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
case io.ReadCloser:
return &sourceReadCloser{s}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
}
}

25
vendor/gopkg.in/ini.v1/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
const (
// Deprecated: Use "DefaultSection" instead.
DEFAULT_SECTION = DefaultSection
)
var (
// Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
AllCapsUnderscore = SnackCase
)

34
vendor/gopkg.in/ini.v1/error.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
// Copyright 2016 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"fmt"
)
// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
type ErrDelimiterNotFound struct {
Line string
}
// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
func IsErrDelimiterNotFound(err error) bool {
_, ok := err.(ErrDelimiterNotFound)
return ok
}
func (err ErrDelimiterNotFound) Error() string {
return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
}

418
vendor/gopkg.in/ini.v1/file.go generated vendored Normal file
View File

@ -0,0 +1,418 @@
// Copyright 2017 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"sync"
)
// File represents a combination of a or more INI file(s) in memory.
type File struct {
options LoadOptions
dataSources []dataSource
// Should make things safe, but sometimes doesn't matter.
BlockMode bool
lock sync.RWMutex
// To keep data in order.
sectionList []string
// Actual data is stored here.
sections map[string]*Section
NameMapper
ValueMapper
}
// newFile initializes File object with given data sources.
func newFile(dataSources []dataSource, opts LoadOptions) *File {
if len(opts.KeyValueDelimiters) == 0 {
opts.KeyValueDelimiters = "=:"
}
return &File{
BlockMode: true,
dataSources: dataSources,
sections: make(map[string]*Section),
sectionList: make([]string, 0, 10),
options: opts,
}
}
// Empty returns an empty file object.
func Empty() *File {
// Ignore error here, we sure our data is good.
f, _ := Load([]byte(""))
return f
}
// NewSection creates a new section.
func (f *File) NewSection(name string) (*Section, error) {
if len(name) == 0 {
return nil, errors.New("error creating new section: empty section name")
} else if f.options.Insensitive && name != DefaultSection {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if inSlice(name, f.sectionList) {
return f.sections[name], nil
}
f.sectionList = append(f.sectionList, name)
f.sections[name] = newSection(f, name)
return f.sections[name], nil
}
// NewRawSection creates a new section with an unparseable body.
func (f *File) NewRawSection(name, body string) (*Section, error) {
section, err := f.NewSection(name)
if err != nil {
return nil, err
}
section.isRawSection = true
section.rawBody = body
return section, nil
}
// NewSections creates a list of sections.
func (f *File) NewSections(names ...string) (err error) {
for _, name := range names {
if _, err = f.NewSection(name); err != nil {
return err
}
}
return nil
}
// GetSection returns section by given name.
func (f *File) GetSection(name string) (*Section, error) {
if len(name) == 0 {
name = DefaultSection
}
if f.options.Insensitive {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
sec := f.sections[name]
if sec == nil {
return nil, fmt.Errorf("section '%s' does not exist", name)
}
return sec, nil
}
// Section assumes named section exists and returns a zero-value when not.
func (f *File) Section(name string) *Section {
sec, err := f.GetSection(name)
if err != nil {
// Note: It's OK here because the only possible error is empty section name,
// but if it's empty, this piece of code won't be executed.
sec, _ = f.NewSection(name)
return sec
}
return sec
}
// Sections returns a list of Section stored in the current instance.
func (f *File) Sections() []*Section {
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
sections := make([]*Section, len(f.sectionList))
for i, name := range f.sectionList {
sections[i] = f.sections[name]
}
return sections
}
// ChildSections returns a list of child sections of given section name.
func (f *File) ChildSections(name string) []*Section {
return f.Section(name).ChildSections()
}
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
copy(list, f.sectionList)
return list
}
// DeleteSection deletes a section.
func (f *File) DeleteSection(name string) {
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if len(name) == 0 {
name = DefaultSection
}
for i, s := range f.sectionList {
if s == name {
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
delete(f.sections, name)
return
}
}
}
func (f *File) reload(s dataSource) error {
r, err := s.ReadCloser()
if err != nil {
return err
}
defer r.Close()
return f.parse(r)
}
// Reload reloads and parses all data sources.
func (f *File) Reload() (err error) {
for _, s := range f.dataSources {
if err = f.reload(s); err != nil {
// In loose mode, we create an empty default section for nonexistent files.
if os.IsNotExist(err) && f.options.Loose {
f.parse(bytes.NewBuffer(nil))
continue
}
return err
}
}
return nil
}
// Append appends one or more data sources and reloads automatically.
func (f *File) Append(source interface{}, others ...interface{}) error {
ds, err := parseDataSource(source)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
for _, s := range others {
ds, err = parseDataSource(s)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
}
return f.Reload()
}
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
equalSign := DefaultFormatLeft + "=" + DefaultFormatRight
if PrettyFormat || PrettyEqual {
equalSign = " = "
}
// Use buffer to make sure target is safe until finish encoding.
buf := bytes.NewBuffer(nil)
for i, sname := range f.sectionList {
sec := f.Section(sname)
if len(sec.Comment) > 0 {
// Support multiline comments
lines := strings.Split(sec.Comment, LineBreak)
for i := range lines {
if lines[i][0] != '#' && lines[i][0] != ';' {
lines[i] = "; " + lines[i]
} else {
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
}
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
return nil, err
}
}
}
if i > 0 || DefaultHeader {
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
return nil, err
}
} else {
// Write nothing if default section is empty
if len(sec.keyList) == 0 {
continue
}
}
if sec.isRawSection {
if _, err := buf.WriteString(sec.rawBody); err != nil {
return nil, err
}
if PrettySection {
// Put a line between sections
if _, err := buf.WriteString(LineBreak); err != nil {
return nil, err
}
}
continue
}
// Count and generate alignment length and buffer spaces using the
// longest key. Keys may be modifed if they contain certain characters so
// we need to take that into account in our calculation.
alignLength := 0
if PrettyFormat {
for _, kname := range sec.keyList {
keyLength := len(kname)
// First case will surround key by ` and second by """
if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
keyLength += 2
} else if strings.Contains(kname, "`") {
keyLength += 6
}
if keyLength > alignLength {
alignLength = keyLength
}
}
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
KeyList:
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
if len(indent) > 0 && sname != DefaultSection {
buf.WriteString(indent)
}
// Support multiline comments
lines := strings.Split(key.Comment, LineBreak)
for i := range lines {
if lines[i][0] != '#' && lines[i][0] != ';' {
lines[i] = "; " + strings.TrimSpace(lines[i])
} else {
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
}
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
return nil, err
}
}
}
if len(indent) > 0 && sname != DefaultSection {
buf.WriteString(indent)
}
switch {
case key.isAutoIncrement:
kname = "-"
case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
kname = "`" + kname + "`"
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
for _, val := range key.ValueWithShadows() {
if _, err := buf.WriteString(kname); err != nil {
return nil, err
}
if key.isBooleanType {
if kname != sec.keyList[len(sec.keyList)-1] {
buf.WriteString(LineBreak)
}
continue KeyList
}
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
return nil, err
}
}
for _, val := range key.nestedValues {
if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
return nil, err
}
}
}
if PrettySection {
// Put a line between sections
if _, err := buf.WriteString(LineBreak); err != nil {
return nil, err
}
}
}
return buf, nil
}
// WriteToIndent writes content into io.Writer with given indention.
// If PrettyFormat has been set to be true,
// it will align "=" sign with spaces under each section.
func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
buf, err := f.writeToBuffer(indent)
if err != nil {
return 0, err
}
return buf.WriteTo(w)
}
// WriteTo writes file content into io.Writer.
func (f *File) WriteTo(w io.Writer) (int64, error) {
return f.WriteToIndent(w, "")
}
// SaveToIndent writes content to file system with given value indention.
func (f *File) SaveToIndent(filename, indent string) error {
// Note: Because we are truncating with os.Create,
// so it's safer to save to a temporary file location and rename afte done.
buf, err := f.writeToBuffer(indent)
if err != nil {
return err
}
return ioutil.WriteFile(filename, buf.Bytes(), 0666)
}
// SaveTo writes content to file system.
func (f *File) SaveTo(filename string) error {
return f.SaveToIndent(filename, "")
}

24
vendor/gopkg.in/ini.v1/helper.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
func inSlice(str string, s []string) bool {
for _, v := range s {
if str == v {
return true
}
}
return false
}

166
vendor/gopkg.in/ini.v1/ini.go generated vendored Normal file
View File

@ -0,0 +1,166 @@
// +build go1.6
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Package ini provides INI file read and write functionality in Go.
package ini
import (
"regexp"
"runtime"
)
const (
// DefaultSection is the name of default section. You can use this constant or the string literal.
// In most of cases, an empty string is all you need to access the section.
DefaultSection = "DEFAULT"
// Maximum allowed depth when recursively substituing variable names.
depthValues = 99
version = "1.52.0"
)
// Version returns current package version literal.
func Version() string {
return version
}
var (
// LineBreak is the delimiter to determine or compose a new line.
// This variable will be changed to "\r\n" automatically on Windows at package init time.
LineBreak = "\n"
// Variable regexp pattern: %(variable)s
varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
// DefaultHeader explicitly writes default section header.
DefaultHeader = false
// PrettySection indicates whether to put a line between sections.
PrettySection = true
// PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
// or reduce all possible spaces for compact format.
PrettyFormat = true
// PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
PrettyEqual = false
// DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
DefaultFormatLeft = ""
// DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
DefaultFormatRight = ""
)
func init() {
if runtime.GOOS == "windows" {
LineBreak = "\r\n"
}
}
// LoadOptions contains all customized options used for load data source(s).
type LoadOptions struct {
// Loose indicates whether the parser should ignore nonexistent files or return error.
Loose bool
// Insensitive indicates whether the parser forces all section and key names to lowercase.
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
IgnoreInlineComment bool
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
SkipUnrecognizableLines bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
// AllowShadows indicates whether to keep track of keys with same name under same section.
AllowShadows bool
// AllowNestedValues indicates whether to allow AWS-like nested values.
// Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
AllowNestedValues bool
// AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
// Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
// Relevant quote: Values can also span multiple lines, as long as they are indented deeper
// than the first line of the value.
AllowPythonMultilineValues bool
// SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
// Docs: https://docs.python.org/2/library/configparser.html
// Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
// In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
SpaceBeforeInlineComment bool
// UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
// when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
UnescapeValueDoubleQuotes bool
// UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
// when value is NOT surrounded by any quotes.
// Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
UnescapeValueCommentSymbols bool
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
// KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
KeyValueDelimiters string
// PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
PreserveSurroundedQuote bool
// DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
DebugFunc DebugFunc
// ReaderBufferSize is the buffer size of the reader in bytes.
ReaderBufferSize int
}
// DebugFunc is the type of function called to log parse events.
type DebugFunc func(message string)
// LoadSources allows caller to apply customized options for loading from data source(s).
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
sources := make([]dataSource, len(others)+1)
sources[0], err = parseDataSource(source)
if err != nil {
return nil, err
}
for i := range others {
sources[i+1], err = parseDataSource(others[i])
if err != nil {
return nil, err
}
}
f := newFile(sources, opts)
if err = f.Reload(); err != nil {
return nil, err
}
return f, nil
}
// Load loads and parses from INI data sources.
// Arguments can be mixed of file name with string type, or raw data in []byte.
// It will return error if list contains nonexistent files.
func Load(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{}, source, others...)
}
// LooseLoad has exactly same functionality as Load function
// except it ignores nonexistent files instead of returning error.
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Loose: true}, source, others...)
}
// InsensitiveLoad has exactly same functionality as Load function
// except it forces all section and key names to be lowercased.
func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
// ShadowLoad has exactly same functionality as Load function
// except it allows have shadow keys.
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
}

801
vendor/gopkg.in/ini.v1/key.go generated vendored Normal file
View File

@ -0,0 +1,801 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"time"
)
// Key represents a key under a section.
type Key struct {
s *Section
Comment string
name string
value string
isAutoIncrement bool
isBooleanType bool
isShadow bool
shadows []*Key
nestedValues []string
}
// newKey simply return a key object with given values.
func newKey(s *Section, name, val string) *Key {
return &Key{
s: s,
name: name,
value: val,
}
}
func (k *Key) addShadow(val string) error {
if k.isShadow {
return errors.New("cannot add shadow to another shadow key")
} else if k.isAutoIncrement || k.isBooleanType {
return errors.New("cannot add shadow to auto-increment or boolean key")
}
// Deduplicate shadows based on their values.
if k.value == val {
return nil
}
for i := range k.shadows {
if k.shadows[i].value == val {
return nil
}
}
shadow := newKey(k.s, k.name, val)
shadow.isShadow = true
k.shadows = append(k.shadows, shadow)
return nil
}
// AddShadow adds a new shadow key to itself.
func (k *Key) AddShadow(val string) error {
if !k.s.f.options.AllowShadows {
return errors.New("shadow key is not allowed")
}
return k.addShadow(val)
}
func (k *Key) addNestedValue(val string) error {
if k.isAutoIncrement || k.isBooleanType {
return errors.New("cannot add nested value to auto-increment or boolean key")
}
k.nestedValues = append(k.nestedValues, val)
return nil
}
// AddNestedValue adds a nested value to the key.
func (k *Key) AddNestedValue(val string) error {
if !k.s.f.options.AllowNestedValues {
return errors.New("nested value is not allowed")
}
return k.addNestedValue(val)
}
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
type ValueMapper func(string) string
// Name returns name of key.
func (k *Key) Name() string {
return k.name
}
// Value returns raw value of key for performance purpose.
func (k *Key) Value() string {
return k.value
}
// ValueWithShadows returns raw values of key and its shadows if any.
func (k *Key) ValueWithShadows() []string {
if len(k.shadows) == 0 {
return []string{k.value}
}
vals := make([]string, len(k.shadows)+1)
vals[0] = k.value
for i := range k.shadows {
vals[i+1] = k.shadows[i].value
}
return vals
}
// NestedValues returns nested values stored in the key.
// It is possible returned value is nil if no nested values stored in the key.
func (k *Key) NestedValues() []string {
return k.nestedValues
}
// transformValue takes a raw value and transforms to its final string.
func (k *Key) transformValue(val string) string {
if k.s.f.ValueMapper != nil {
val = k.s.f.ValueMapper(val)
}
// Fail-fast if no indicate char found for recursive value
if !strings.Contains(val, "%") {
return val
}
for i := 0; i < depthValues; i++ {
vr := varPattern.FindString(val)
if len(vr) == 0 {
break
}
// Take off leading '%(' and trailing ')s'.
noption := vr[2 : len(vr)-2]
// Search in the same section.
// If not found or found the key itself, then search again in default section.
nk, err := k.s.GetKey(noption)
if err != nil || k == nk {
nk, _ = k.s.f.Section("").GetKey(noption)
if nk == nil {
// Stop when no results found in the default section,
// and returns the value as-is.
break
}
}
// Substitute by new value and take off leading '%(' and trailing ')s'.
val = strings.Replace(val, vr, nk.value, -1)
}
return val
}
// String returns string representation of value.
func (k *Key) String() string {
return k.transformValue(k.value)
}
// Validate accepts a validate function which can
// return modifed result as key value.
func (k *Key) Validate(fn func(string) string) string {
return fn(k.String())
}
// parseBool returns the boolean value represented by the string.
//
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
// Any other value returns an error.
func parseBool(str string) (value bool, err error) {
switch str {
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
return true, nil
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
return false, nil
}
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
}
// Bool returns bool type value.
func (k *Key) Bool() (bool, error) {
return parseBool(k.String())
}
// Float64 returns float64 type value.
func (k *Key) Float64() (float64, error) {
return strconv.ParseFloat(k.String(), 64)
}
// Int returns int type value.
func (k *Key) Int() (int, error) {
v, err := strconv.ParseInt(k.String(), 0, 64)
return int(v), err
}
// Int64 returns int64 type value.
func (k *Key) Int64() (int64, error) {
return strconv.ParseInt(k.String(), 0, 64)
}
// Uint returns uint type valued.
func (k *Key) Uint() (uint, error) {
u, e := strconv.ParseUint(k.String(), 0, 64)
return uint(u), e
}
// Uint64 returns uint64 type value.
func (k *Key) Uint64() (uint64, error) {
return strconv.ParseUint(k.String(), 0, 64)
}
// Duration returns time.Duration type value.
func (k *Key) Duration() (time.Duration, error) {
return time.ParseDuration(k.String())
}
// TimeFormat parses with given format and returns time.Time type value.
func (k *Key) TimeFormat(format string) (time.Time, error) {
return time.Parse(format, k.String())
}
// Time parses with RFC3339 format and returns time.Time type value.
func (k *Key) Time() (time.Time, error) {
return k.TimeFormat(time.RFC3339)
}
// MustString returns default value if key value is empty.
func (k *Key) MustString(defaultVal string) string {
val := k.String()
if len(val) == 0 {
k.value = defaultVal
return defaultVal
}
return val
}
// MustBool always returns value without error,
// it returns false if error occurs.
func (k *Key) MustBool(defaultVal ...bool) bool {
val, err := k.Bool()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatBool(defaultVal[0])
return defaultVal[0]
}
return val
}
// MustFloat64 always returns value without error,
// it returns 0.0 if error occurs.
func (k *Key) MustFloat64(defaultVal ...float64) float64 {
val, err := k.Float64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
return defaultVal[0]
}
return val
}
// MustInt always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt(defaultVal ...int) int {
val, err := k.Int()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
return defaultVal[0]
}
return val
}
// MustInt64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt64(defaultVal ...int64) int64 {
val, err := k.Int64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatInt(defaultVal[0], 10)
return defaultVal[0]
}
return val
}
// MustUint always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint(defaultVal ...uint) uint {
val, err := k.Uint()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
return defaultVal[0]
}
return val
}
// MustUint64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
val, err := k.Uint64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatUint(defaultVal[0], 10)
return defaultVal[0]
}
return val
}
// MustDuration always returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
val, err := k.Duration()
if len(defaultVal) > 0 && err != nil {
k.value = defaultVal[0].String()
return defaultVal[0]
}
return val
}
// MustTimeFormat always parses with given format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
val, err := k.TimeFormat(format)
if len(defaultVal) > 0 && err != nil {
k.value = defaultVal[0].Format(format)
return defaultVal[0]
}
return val
}
// MustTime always parses with RFC3339 format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
return k.MustTimeFormat(time.RFC3339, defaultVal...)
}
// In always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) In(defaultVal string, candidates []string) string {
val := k.String()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InFloat64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
val := k.MustFloat64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt(defaultVal int, candidates []int) int {
val := k.MustInt()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
val := k.MustInt64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
val := k.MustUint()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
val := k.MustUint64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTimeFormat always parses with given format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
val := k.MustTimeFormat(format)
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTime always parses with RFC3339 format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
}
// RangeFloat64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
val := k.MustFloat64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt(defaultVal, min, max int) int {
val := k.MustInt()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
val := k.MustInt64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeTimeFormat checks if value with given format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
val := k.MustTimeFormat(format)
if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
return defaultVal
}
return val
}
// RangeTime checks if value with RFC3339 format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
}
// Strings returns list of string divided by given delimiter.
func (k *Key) Strings(delim string) []string {
str := k.String()
if len(str) == 0 {
return []string{}
}
runes := []rune(str)
vals := make([]string, 0, 2)
var buf bytes.Buffer
escape := false
idx := 0
for {
if escape {
escape = false
if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
buf.WriteRune('\\')
}
buf.WriteRune(runes[idx])
} else {
if runes[idx] == '\\' {
escape = true
} else if strings.HasPrefix(string(runes[idx:]), delim) {
idx += len(delim) - 1
vals = append(vals, strings.TrimSpace(buf.String()))
buf.Reset()
} else {
buf.WriteRune(runes[idx])
}
}
idx++
if idx == len(runes) {
break
}
}
if buf.Len() > 0 {
vals = append(vals, strings.TrimSpace(buf.String()))
}
return vals
}
// StringsWithShadows returns list of string divided by given delimiter.
// Shadows will also be appended if any.
func (k *Key) StringsWithShadows(delim string) []string {
vals := k.ValueWithShadows()
results := make([]string, 0, len(vals)*2)
for i := range vals {
if len(vals) == 0 {
continue
}
results = append(results, strings.Split(vals[i], delim)...)
}
for i := range results {
results[i] = k.transformValue(strings.TrimSpace(results[i]))
}
return results
}
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Float64s(delim string) []float64 {
vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
return vals
}
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Ints(delim string) []int {
vals, _ := k.parseInts(k.Strings(delim), true, false)
return vals
}
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Int64s(delim string) []int64 {
vals, _ := k.parseInt64s(k.Strings(delim), true, false)
return vals
}
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uints(delim string) []uint {
vals, _ := k.parseUints(k.Strings(delim), true, false)
return vals
}
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uint64s(delim string) []uint64 {
vals, _ := k.parseUint64s(k.Strings(delim), true, false)
return vals
}
// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Bools(delim string) []bool {
vals, _ := k.parseBools(k.Strings(delim), true, false)
return vals
}
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) TimesFormat(format, delim string) []time.Time {
vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
return vals
}
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) Times(delim string) []time.Time {
return k.TimesFormat(time.RFC3339, delim)
}
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
// it will not be included to result list.
func (k *Key) ValidFloat64s(delim string) []float64 {
vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
return vals
}
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
// not be included to result list.
func (k *Key) ValidInts(delim string) []int {
vals, _ := k.parseInts(k.Strings(delim), false, false)
return vals
}
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
// then it will not be included to result list.
func (k *Key) ValidInt64s(delim string) []int64 {
vals, _ := k.parseInt64s(k.Strings(delim), false, false)
return vals
}
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
// then it will not be included to result list.
func (k *Key) ValidUints(delim string) []uint {
vals, _ := k.parseUints(k.Strings(delim), false, false)
return vals
}
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidUint64s(delim string) []uint64 {
vals, _ := k.parseUint64s(k.Strings(delim), false, false)
return vals
}
// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidBools(delim string) []bool {
vals, _ := k.parseBools(k.Strings(delim), false, false)
return vals
}
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
return vals
}
// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimes(delim string) []time.Time {
return k.ValidTimesFormat(time.RFC3339, delim)
}
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
return k.parseFloat64s(k.Strings(delim), false, true)
}
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
func (k *Key) StrictInts(delim string) ([]int, error) {
return k.parseInts(k.Strings(delim), false, true)
}
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
return k.parseInt64s(k.Strings(delim), false, true)
}
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
func (k *Key) StrictUints(delim string) ([]uint, error) {
return k.parseUints(k.Strings(delim), false, true)
}
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
return k.parseUint64s(k.Strings(delim), false, true)
}
// StrictBools returns list of bool divided by given delimiter or error on first invalid input.
func (k *Key) StrictBools(delim string) ([]bool, error) {
return k.parseBools(k.Strings(delim), false, true)
}
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
return k.parseTimesFormat(format, k.Strings(delim), false, true)
}
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
return k.StrictTimesFormat(time.RFC3339, delim)
}
// parseBools transforms strings to bools.
func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
vals := make([]bool, 0, len(strs))
for _, str := range strs {
val, err := parseBool(str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// parseFloat64s transforms strings to float64s.
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
vals := make([]float64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseFloat(str, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// parseInts transforms strings to ints.
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
vals := make([]int, 0, len(strs))
for _, str := range strs {
valInt64, err := strconv.ParseInt(str, 0, 64)
val := int(valInt64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// parseInt64s transforms strings to int64s.
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
vals := make([]int64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseInt(str, 0, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// parseUints transforms strings to uints.
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
vals := make([]uint, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 0, 0)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, uint(val))
}
}
return vals, nil
}
// parseUint64s transforms strings to uint64s.
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
vals := make([]uint64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 0, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// parseTimesFormat transforms strings to times in given format.
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
vals := make([]time.Time, 0, len(strs))
for _, str := range strs {
val, err := time.Parse(format, str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// SetValue changes key value.
func (k *Key) SetValue(v string) {
if k.s.f.BlockMode {
k.s.f.lock.Lock()
defer k.s.f.lock.Unlock()
}
k.value = v
k.s.keysHash[k.name] = v
}

526
vendor/gopkg.in/ini.v1/parser.go generated vendored Normal file
View File

@ -0,0 +1,526 @@
// Copyright 2015 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"strconv"
"strings"
"unicode"
)
const minReaderBufferSize = 4096
var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
type parserOptions struct {
IgnoreContinuation bool
IgnoreInlineComment bool
AllowPythonMultilineValues bool
SpaceBeforeInlineComment bool
UnescapeValueDoubleQuotes bool
UnescapeValueCommentSymbols bool
PreserveSurroundedQuote bool
DebugFunc DebugFunc
ReaderBufferSize int
}
type parser struct {
buf *bufio.Reader
options parserOptions
isEOF bool
count int
comment *bytes.Buffer
}
func (p *parser) debug(format string, args ...interface{}) {
if p.options.DebugFunc != nil {
p.options.DebugFunc(fmt.Sprintf(format, args...))
}
}
func newParser(r io.Reader, opts parserOptions) *parser {
size := opts.ReaderBufferSize
if size < minReaderBufferSize {
size = minReaderBufferSize
}
return &parser{
buf: bufio.NewReaderSize(r, size),
options: opts,
count: 1,
comment: &bytes.Buffer{},
}
}
// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
func (p *parser) BOM() error {
mask, err := p.buf.Peek(2)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 2 {
return nil
}
switch {
case mask[0] == 254 && mask[1] == 255:
fallthrough
case mask[0] == 255 && mask[1] == 254:
p.buf.Read(mask)
case mask[0] == 239 && mask[1] == 187:
mask, err := p.buf.Peek(3)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 3 {
return nil
}
if mask[2] == 191 {
p.buf.Read(mask)
}
}
return nil
}
func (p *parser) readUntil(delim byte) ([]byte, error) {
data, err := p.buf.ReadBytes(delim)
if err != nil {
if err == io.EOF {
p.isEOF = true
} else {
return nil, err
}
}
return data, nil
}
func cleanComment(in []byte) ([]byte, bool) {
i := bytes.IndexAny(in, "#;")
if i == -1 {
return nil, false
}
return in[i:], true
}
func readKeyName(delimiters string, in []byte) (string, int, error) {
line := string(in)
// Check if key name surrounded by quotes.
var keyQuote string
if line[0] == '"' {
if len(line) > 6 && string(line[0:3]) == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
}
} else if line[0] == '`' {
keyQuote = "`"
}
// Get out key name
endIdx := -1
if len(keyQuote) > 0 {
startIdx := len(keyQuote)
// FIXME: fail case -> """"""name"""=value
pos := strings.Index(line[startIdx:], keyQuote)
if pos == -1 {
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
}
pos += startIdx
// Find key-value delimiter
i := strings.IndexAny(line[pos+startIdx:], delimiters)
if i < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
endIdx = pos + i
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
}
endIdx = strings.IndexAny(line, delimiters)
if endIdx < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
}
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := string(data)
pos := strings.LastIndex(next, valQuote)
if pos > -1 {
val += next[:pos]
comment, has := cleanComment([]byte(next[pos:]))
if has {
p.comment.Write(bytes.TrimSpace(comment))
}
break
}
val += next
if p.isEOF {
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
}
}
return val, nil
}
func (p *parser) readContinuationLines(val string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := strings.TrimSpace(string(data))
if len(next) == 0 {
break
}
val += next
if val[len(val)-1] != '\\' {
break
}
val = val[:len(val)-1]
}
return val, nil
}
// hasSurroundedQuote check if and only if the first and last characters
// are quotes \" or \'.
// It returns false if any other parts also contain same kind of quotes.
func hasSurroundedQuote(in string, quote byte) bool {
return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
return p.readPythonMultilines(line, bufferSize)
}
return "", nil
}
var valQuote string
if len(line) > 3 && string(line[0:3]) == `"""` {
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
} else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
valQuote = `"`
}
if len(valQuote) > 0 {
startIdx := len(valQuote)
pos := strings.LastIndex(line[startIdx:], valQuote)
// Check for multi-line value
if pos == -1 {
return p.readMultilines(line, line[startIdx:], valQuote)
}
if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
}
return line[startIdx : pos+startIdx], nil
}
lastChar := line[len(line)-1]
// Won't be able to reach here if value only contains whitespace
line = strings.TrimSpace(line)
trimmedLastChar := line[len(line)-1]
// Check continuation lines when desired
if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
// Check if ignore inline comment
if !p.options.IgnoreInlineComment {
var i int
if p.options.SpaceBeforeInlineComment {
i = strings.Index(line, " #")
if i == -1 {
i = strings.Index(line, " ;")
}
} else {
i = strings.IndexAny(line, "#;")
}
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
}
// Trim single and double quotes
if (hasSurroundedQuote(line, '\'') ||
hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
line = line[1 : len(line)-1]
} else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
if strings.Contains(line, `\;`) {
line = strings.Replace(line, `\;`, ";", -1)
}
if strings.Contains(line, `\#`) {
line = strings.Replace(line, `\#`, "#", -1)
}
} else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
return p.readPythonMultilines(line, bufferSize)
}
return line, nil
}
func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
indentSize := 0
for {
peekData, peekErr := peekBuffer.ReadBytes('\n')
if peekErr != nil {
if peekErr == io.EOF {
p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line)
return line, nil
}
p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
return "", peekErr
}
p.debug("readPythonMultilines: parsing %q", string(peekData))
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
for n, v := range peekMatches {
p.debug(" %d: %q", n, v)
}
// Return if not a Python multiline value.
if len(peekMatches) != 3 {
p.debug("readPythonMultilines: end of value, got: %q", line)
return line, nil
}
// Determine indent size and line prefix.
currentIndentSize := len(peekMatches[1])
if indentSize < 1 {
indentSize = currentIndentSize
p.debug("readPythonMultilines: indent size is %d", indentSize)
}
// Make sure each line is indented at least as far as first line.
if currentIndentSize < indentSize {
p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line)
return line, nil
}
// Advance the parser reader (buffer) in-sync with the peek buffer.
_, err := p.buf.Discard(len(peekData))
if err != nil {
p.debug("readPythonMultilines: failed to skip to the end, returning error")
return "", err
}
// Handle indented empty line.
line += "\n" + peekMatches[1][indentSize:] + peekMatches[2]
}
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) (err error) {
p := newParser(reader, parserOptions{
IgnoreContinuation: f.options.IgnoreContinuation,
IgnoreInlineComment: f.options.IgnoreInlineComment,
AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
DebugFunc: f.options.DebugFunc,
ReaderBufferSize: f.options.ReaderBufferSize,
})
if err = p.BOM(); err != nil {
return fmt.Errorf("BOM: %v", err)
}
// Ignore error because default section name is never empty string.
name := DefaultSection
if f.options.Insensitive {
name = strings.ToLower(DefaultSection)
}
section, _ := f.NewSection(name)
// This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
var isLastValueEmpty bool
var lastRegularKey *Key
var line []byte
var inUnparseableSection bool
// NOTE: Iterate and increase `currentPeekSize` until
// the size of the parser buffer is found.
// TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
parserBufferSize := 0
// NOTE: Peek 4kb at a time.
currentPeekSize := minReaderBufferSize
if f.options.AllowPythonMultilineValues {
for {
peekBytes, _ := p.buf.Peek(currentPeekSize)
peekBytesLength := len(peekBytes)
if parserBufferSize >= peekBytesLength {
break
}
currentPeekSize *= 2
parserBufferSize = peekBytesLength
}
}
for !p.isEOF {
line, err = p.readUntil('\n')
if err != nil {
return err
}
if f.options.AllowNestedValues &&
isLastValueEmpty && len(line) > 0 {
if line[0] == ' ' || line[0] == '\t' {
lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
continue
}
}
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
if len(line) == 0 {
continue
}
// Comments
if line[0] == '#' || line[0] == ';' {
// Note: we do not care ending line break,
// it is needed for adding second line,
// so just clean it once at the end when set to value.
p.comment.Write(line)
continue
}
// Section
if line[0] == '[' {
// Read to the next ']' (TODO: support quoted strings)
closeIdx := bytes.LastIndexByte(line, ']')
if closeIdx == -1 {
return fmt.Errorf("unclosed section: %s", line)
}
name := string(line[1:closeIdx])
section, err = f.NewSection(name)
if err != nil {
return err
}
comment, has := cleanComment(line[closeIdx+1:])
if has {
p.comment.Write(comment)
}
section.Comment = strings.TrimSpace(p.comment.String())
// Reset auto-counter and comments
p.comment.Reset()
p.count = 1
inUnparseableSection = false
for i := range f.options.UnparseableSections {
if f.options.UnparseableSections[i] == name ||
(f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
inUnparseableSection = true
continue
}
}
continue
}
if inUnparseableSection {
section.isRawSection = true
section.rawBody += string(line)
continue
}
kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) {
switch {
case f.options.AllowBooleanKeys:
kname, err := p.readValue(line, parserBufferSize)
if err != nil {
return err
}
key, err := section.NewBooleanKey(kname)
if err != nil {
return err
}
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
continue
case f.options.SkipUnrecognizableLines:
continue
}
}
return err
}
// Auto increment.
isAutoIncr := false
if kname == "-" {
isAutoIncr = true
kname = "#" + strconv.Itoa(p.count)
p.count++
}
value, err := p.readValue(line[offset:], parserBufferSize)
if err != nil {
return err
}
isLastValueEmpty = len(value) == 0
key, err := section.NewKey(kname, value)
if err != nil {
return err
}
key.isAutoIncrement = isAutoIncr
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
lastRegularKey = key
}
return nil
}

256
vendor/gopkg.in/ini.v1/section.go generated vendored Normal file
View File

@ -0,0 +1,256 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"errors"
"fmt"
"strings"
)
// Section represents a config section.
type Section struct {
f *File
Comment string
name string
keys map[string]*Key
keyList []string
keysHash map[string]string
isRawSection bool
rawBody string
}
func newSection(f *File, name string) *Section {
return &Section{
f: f,
name: name,
keys: make(map[string]*Key),
keyList: make([]string, 0, 10),
keysHash: make(map[string]string),
}
}
// Name returns name of Section.
func (s *Section) Name() string {
return s.name
}
// Body returns rawBody of Section if the section was marked as unparseable.
// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
func (s *Section) Body() string {
return strings.TrimSpace(s.rawBody)
}
// SetBody updates body content only if section is raw.
func (s *Section) SetBody(body string) {
if !s.isRawSection {
return
}
s.rawBody = body
}
// NewKey creates a new key to given section.
func (s *Section) NewKey(name, val string) (*Key, error) {
if len(name) == 0 {
return nil, errors.New("error creating new key: empty key name")
} else if s.f.options.Insensitive {
name = strings.ToLower(name)
}
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
if inSlice(name, s.keyList) {
if s.f.options.AllowShadows {
if err := s.keys[name].addShadow(val); err != nil {
return nil, err
}
} else {
s.keys[name].value = val
s.keysHash[name] = val
}
return s.keys[name], nil
}
s.keyList = append(s.keyList, name)
s.keys[name] = newKey(s, name, val)
s.keysHash[name] = val
return s.keys[name], nil
}
// NewBooleanKey creates a new boolean type key to given section.
func (s *Section) NewBooleanKey(name string) (*Key, error) {
key, err := s.NewKey(name, "true")
if err != nil {
return nil, err
}
key.isBooleanType = true
return key, nil
}
// GetKey returns key in section by given name.
func (s *Section) GetKey(name string) (*Key, error) {
if s.f.BlockMode {
s.f.lock.RLock()
}
if s.f.options.Insensitive {
name = strings.ToLower(name)
}
key := s.keys[name]
if s.f.BlockMode {
s.f.lock.RUnlock()
}
if key == nil {
// Check if it is a child-section.
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
return sec.GetKey(name)
}
break
}
return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
}
return key, nil
}
// HasKey returns true if section contains a key with given name.
func (s *Section) HasKey(name string) bool {
key, _ := s.GetKey(name)
return key != nil
}
// Deprecated: Use "HasKey" instead.
func (s *Section) Haskey(name string) bool {
return s.HasKey(name)
}
// HasValue returns true if section contains given raw value.
func (s *Section) HasValue(value string) bool {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
for _, k := range s.keys {
if value == k.value {
return true
}
}
return false
}
// Key assumes named Key exists in section and returns a zero-value when not.
func (s *Section) Key(name string) *Key {
key, err := s.GetKey(name)
if err != nil {
// It's OK here because the only possible error is empty key name,
// but if it's empty, this piece of code won't be executed.
key, _ = s.NewKey(name, "")
return key
}
return key
}
// Keys returns list of keys of section.
func (s *Section) Keys() []*Key {
keys := make([]*Key, len(s.keyList))
for i := range s.keyList {
keys[i] = s.Key(s.keyList[i])
}
return keys
}
// ParentKeys returns list of keys of parent section.
func (s *Section) ParentKeys() []*Key {
var parentKeys []*Key
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
parentKeys = append(parentKeys, sec.Keys()...)
} else {
break
}
}
return parentKeys
}
// KeyStrings returns list of key names of section.
func (s *Section) KeyStrings() []string {
list := make([]string, len(s.keyList))
copy(list, s.keyList)
return list
}
// KeysHash returns keys hash consisting of names and values.
func (s *Section) KeysHash() map[string]string {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
hash := map[string]string{}
for key, value := range s.keysHash {
hash[key] = value
}
return hash
}
// DeleteKey deletes a key from section.
func (s *Section) DeleteKey(name string) {
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
for i, k := range s.keyList {
if k == name {
s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
delete(s.keys, name)
delete(s.keysHash, name)
return
}
}
}
// ChildSections returns a list of child sections of current section.
// For example, "[parent.child1]" and "[parent.child12]" are child sections
// of section "[parent]".
func (s *Section) ChildSections() []*Section {
prefix := s.name + "."
children := make([]*Section, 0, 3)
for _, name := range s.f.sectionList {
if strings.HasPrefix(name, prefix) {
children = append(children, s.f.sections[name])
}
}
return children
}

615
vendor/gopkg.in/ini.v1/struct.go generated vendored Normal file
View File

@ -0,0 +1,615 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"reflect"
"strings"
"time"
"unicode"
)
// NameMapper represents a ini tag name mapper.
type NameMapper func(string) string
// Built-in name getters.
var (
// SnackCase converts to format SNACK_CASE.
SnackCase NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
}
newstr = append(newstr, unicode.ToUpper(chr))
}
return string(newstr)
}
// TitleUnderscore converts to format title_underscore.
TitleUnderscore NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
chr -= 'A' - 'a'
}
newstr = append(newstr, chr)
}
return string(newstr)
}
)
func (s *Section) parseFieldName(raw, actual string) string {
if len(actual) > 0 {
return actual
}
if s.f.NameMapper != nil {
return s.f.NameMapper(raw)
}
return raw
}
func parseDelim(actual string) string {
if len(actual) > 0 {
return actual
}
return ","
}
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
var strs []string
if allowShadow {
strs = key.StringsWithShadows(delim)
} else {
strs = key.Strings(delim)
}
numVals := len(strs)
if numVals == 0 {
return nil
}
var vals interface{}
var err error
sliceOf := field.Type().Elem().Kind()
switch sliceOf {
case reflect.String:
vals = strs
case reflect.Int:
vals, err = key.parseInts(strs, true, false)
case reflect.Int64:
vals, err = key.parseInt64s(strs, true, false)
case reflect.Uint:
vals, err = key.parseUints(strs, true, false)
case reflect.Uint64:
vals, err = key.parseUint64s(strs, true, false)
case reflect.Float64:
vals, err = key.parseFloat64s(strs, true, false)
case reflect.Bool:
vals, err = key.parseBools(strs, true, false)
case reflectTime:
vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
if err != nil && isStrict {
return err
}
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
for i := 0; i < numVals; i++ {
switch sliceOf {
case reflect.String:
slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
case reflect.Int:
slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
case reflect.Int64:
slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
case reflect.Uint:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
case reflect.Uint64:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
case reflect.Float64:
slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
case reflect.Bool:
slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i]))
case reflectTime:
slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
}
}
field.Set(slice)
return nil
}
func wrapStrictError(err error, isStrict bool) error {
if isStrict {
return err
}
return nil
}
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to struct.
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
vt := t
isPtr := t.Kind() == reflect.Ptr
if isPtr {
vt = t.Elem()
}
switch vt.Kind() {
case reflect.String:
stringVal := key.String()
if isPtr {
field.Set(reflect.ValueOf(&stringVal))
} else if len(stringVal) > 0 {
field.SetString(key.String())
}
case reflect.Bool:
boolVal, err := key.Bool()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
field.Set(reflect.ValueOf(&boolVal))
} else {
field.SetBool(boolVal)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
// ParseDuration will not return err for `0`, so check the type name
if vt.Name() == "Duration" {
durationVal, err := key.Duration()
if err != nil {
if intVal, err := key.Int64(); err == nil {
field.SetInt(intVal)
return nil
}
return wrapStrictError(err, isStrict)
}
if isPtr {
field.Set(reflect.ValueOf(&durationVal))
} else if int64(durationVal) > 0 {
field.Set(reflect.ValueOf(durationVal))
}
return nil
}
intVal, err := key.Int64()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
pv := reflect.New(t.Elem())
pv.Elem().SetInt(intVal)
field.Set(pv)
} else {
field.SetInt(intVal)
}
// byte is an alias for uint8, so supporting uint8 breaks support for byte
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
durationVal, err := key.Duration()
// Skip zero value
if err == nil && uint64(durationVal) > 0 {
if isPtr {
field.Set(reflect.ValueOf(&durationVal))
} else {
field.Set(reflect.ValueOf(durationVal))
}
return nil
}
uintVal, err := key.Uint64()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
pv := reflect.New(t.Elem())
pv.Elem().SetUint(uintVal)
field.Set(pv)
} else {
field.SetUint(uintVal)
}
case reflect.Float32, reflect.Float64:
floatVal, err := key.Float64()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
pv := reflect.New(t.Elem())
pv.Elem().SetFloat(floatVal)
field.Set(pv)
} else {
field.SetFloat(floatVal)
}
case reflectTime:
timeVal, err := key.Time()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
field.Set(reflect.ValueOf(&timeVal))
} else {
field.Set(reflect.ValueOf(timeVal))
}
case reflect.Slice:
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
opts := strings.SplitN(tag, ",", 3)
rawName = opts[0]
if len(opts) > 1 {
omitEmpty = opts[1] == "omitempty"
}
if len(opts) > 2 {
allowShadow = opts[2] == "allowshadow"
}
return rawName, omitEmpty, allowShadow
}
func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
rawName, _, allowShadow := parseTagOptions(tag)
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
isStruct := tpField.Type.Kind() == reflect.Struct
isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
if isAnonymous {
field.Set(reflect.New(tpField.Type.Elem()))
}
if isAnonymous || isStruct || isStructPtr {
if sec, err := s.f.GetSection(fieldName); err == nil {
// Only set the field to non-nil struct value if we have a section for it.
// Otherwise, we end up with a non-nil struct ptr even though there is no data.
if isStructPtr && field.IsNil() {
field.Set(reflect.New(tpField.Type.Elem()))
}
if err = sec.mapTo(field, isStrict); err != nil {
return fmt.Errorf("error mapping field %q: %v", fieldName, err)
}
continue
}
}
if key, err := s.GetKey(fieldName); err == nil {
delim := parseDelim(tpField.Tag.Get("delim"))
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
return fmt.Errorf("error mapping field %q: %v", fieldName, err)
}
}
}
return nil
}
// MapTo maps section to given struct.
func (s *Section) MapTo(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot map to non-pointer struct")
}
return s.mapTo(val, false)
}
// StrictMapTo maps section to given struct in strict mode,
// which returns all possible error including value parsing error.
func (s *Section) StrictMapTo(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot map to non-pointer struct")
}
return s.mapTo(val, true)
}
// MapTo maps file to given struct.
func (f *File) MapTo(v interface{}) error {
return f.Section("").MapTo(v)
}
// StrictMapTo maps file to given struct in strict mode,
// which returns all possible error including value parsing error.
func (f *File) StrictMapTo(v interface{}) error {
return f.Section("").StrictMapTo(v)
}
// MapToWithMapper maps data sources to given struct with name mapper.
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
cfg, err := Load(source, others...)
if err != nil {
return err
}
cfg.NameMapper = mapper
return cfg.MapTo(v)
}
// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
// which returns all possible error including value parsing error.
func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
cfg, err := Load(source, others...)
if err != nil {
return err
}
cfg.NameMapper = mapper
return cfg.StrictMapTo(v)
}
// MapTo maps data sources to given struct.
func MapTo(v, source interface{}, others ...interface{}) error {
return MapToWithMapper(v, nil, source, others...)
}
// StrictMapTo maps data sources to given struct in strict mode,
// which returns all possible error including value parsing error.
func StrictMapTo(v, source interface{}, others ...interface{}) error {
return StrictMapToWithMapper(v, nil, source, others...)
}
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
slice := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
sliceOf := field.Type().Elem().Kind()
if allowShadow {
var keyWithShadows *Key
for i := 0; i < field.Len(); i++ {
var val string
switch sliceOf {
case reflect.String:
val = slice.Index(i).String()
case reflect.Int, reflect.Int64:
val = fmt.Sprint(slice.Index(i).Int())
case reflect.Uint, reflect.Uint64:
val = fmt.Sprint(slice.Index(i).Uint())
case reflect.Float64:
val = fmt.Sprint(slice.Index(i).Float())
case reflect.Bool:
val = fmt.Sprint(slice.Index(i).Bool())
case reflectTime:
val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
if i == 0 {
keyWithShadows = newKey(key.s, key.name, val)
} else {
keyWithShadows.AddShadow(val)
}
}
key = keyWithShadows
return nil
}
var buf bytes.Buffer
for i := 0; i < field.Len(); i++ {
switch sliceOf {
case reflect.String:
buf.WriteString(slice.Index(i).String())
case reflect.Int, reflect.Int64:
buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
case reflect.Uint, reflect.Uint64:
buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
case reflect.Float64:
buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
case reflect.Bool:
buf.WriteString(fmt.Sprint(slice.Index(i).Bool()))
case reflectTime:
buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
buf.WriteString(delim)
}
key.SetValue(buf.String()[:buf.Len()-len(delim)])
return nil
}
// reflectWithProperType does the opposite thing as setWithProperType.
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
switch t.Kind() {
case reflect.String:
key.SetValue(field.String())
case reflect.Bool:
key.SetValue(fmt.Sprint(field.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
key.SetValue(fmt.Sprint(field.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
key.SetValue(fmt.Sprint(field.Uint()))
case reflect.Float32, reflect.Float64:
key.SetValue(fmt.Sprint(field.Float()))
case reflectTime:
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
case reflect.Slice:
return reflectSliceWithProperType(key, field, delim, allowShadow)
case reflect.Ptr:
if !field.IsNil() {
return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
}
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
// CR: copied from encoding/json/encode.go with modifications of time.Time support.
// TODO: add more test coverage.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflectTime:
t, ok := v.Interface().(time.Time)
return ok && t.IsZero()
}
return false
}
// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
type StructReflector interface {
ReflectINIStruct(*File) error
}
func (s *Section) reflectFrom(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
rawName, omitEmpty, allowShadow := parseTagOptions(tag)
if omitEmpty && isEmptyValue(field) {
continue
}
if r, ok := field.Interface().(StructReflector); ok {
return r.ReflectINIStruct(s.f)
}
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
// Note: The only error here is section doesn't exist.
sec, err := s.f.GetSection(fieldName)
if err != nil {
// Note: fieldName can never be empty here, ignore error.
sec, _ = s.f.NewSection(fieldName)
}
// Add comment from comment tag
if len(sec.Comment) == 0 {
sec.Comment = tpField.Tag.Get("comment")
}
if err = sec.reflectFrom(field); err != nil {
return fmt.Errorf("error reflecting field %q: %v", fieldName, err)
}
continue
}
key, err := s.GetKey(fieldName)
if err != nil {
key, _ = s.NewKey(fieldName, "")
}
// Add comment from comment tag
if len(key.Comment) == 0 {
key.Comment = tpField.Tag.Get("comment")
}
delim := parseDelim(tpField.Tag.Get("delim"))
if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
return fmt.Errorf("error reflecting field %q: %v", fieldName, err)
}
}
return nil
}
// ReflectFrom reflects secion from given struct.
func (s *Section) ReflectFrom(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot reflect from non-pointer struct")
}
return s.reflectFrom(val)
}
// ReflectFrom reflects file from given struct.
func (f *File) ReflectFrom(v interface{}) error {
return f.Section("").ReflectFrom(v)
}
// ReflectFromWithMapper reflects data sources from given struct with name mapper.
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
cfg.NameMapper = mapper
return cfg.ReflectFrom(v)
}
// ReflectFrom reflects data sources from given struct.
func ReflectFrom(cfg *File, v interface{}) error {
return ReflectFromWithMapper(cfg, v, nil)
}

0
vendor/gopkg.in/ldap.v3/.gitignore generated vendored Normal file
View File

32
vendor/gopkg.in/ldap.v3/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,32 @@
sudo: false
language: go
go:
- "1.5.x"
- "1.6.x"
- "1.7.x"
- "1.8.x"
- "1.9.x"
- "1.10.x"
- "1.11.x"
- "1.12.x"
- "1.13.x"
- tip
git:
depth: 1
matrix:
fast_finish: true
allow_failures:
- go: tip
go_import_path: gopkg.in/ldap.v3
install:
- go get gopkg.in/asn1-ber.v1
- go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover
- go get github.com/golang/lint/golint || go get golang.org/x/lint/golint || true
- go build -v ./...
script:
- make test
- make fmt
- make vet
- make lint

12
vendor/gopkg.in/ldap.v3/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,12 @@
# Contribution Guidelines
We welcome contribution and improvements.
## Guiding Principles
To begin with here is a draft from an email exchange:
* take compatibility seriously (our semvers, compatibility with older go versions, etc)
* don't tag untested code for release
* beware of baking in implicit behavior based on other libraries/tools choices
* be as high-fidelity as possible in plumbing through LDAP data (don't mask errors or reduce power of someone using the library)

22
vendor/gopkg.in/ldap.v3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
Portions copyright (c) 2015-2016 go-ldap Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

82
vendor/gopkg.in/ldap.v3/Makefile generated vendored Normal file
View File

@ -0,0 +1,82 @@
.PHONY: default install build test quicktest fmt vet lint
# List of all release tags "supported" by our current Go version
# E.g. ":go1.1:go1.2:go1.3:go1.4:go1.5:go1.6:go1.7:go1.8:go1.9:go1.10:go1.11:go1.12:"
GO_RELEASE_TAGS := $(shell go list -f ':{{join (context.ReleaseTags) ":"}}:' runtime)
# Only use the `-race` flag on newer versions of Go (version 1.3 and newer)
ifeq (,$(findstring :go1.3:,$(GO_RELEASE_TAGS)))
RACE_FLAG :=
else
RACE_FLAG := -race -cpu 1,2,4
endif
# Run `go vet` on Go 1.12 and newer. For Go 1.5-1.11, use `go tool vet`
ifneq (,$(findstring :go1.12:,$(GO_RELEASE_TAGS)))
GO_VET := go vet \
-atomic \
-bool \
-copylocks \
-nilfunc \
-printf \
-rangeloops \
-unreachable \
-unsafeptr \
-unusedresult \
.
else ifneq (,$(findstring :go1.5:,$(GO_RELEASE_TAGS)))
GO_VET := go tool vet \
-atomic \
-bool \
-copylocks \
-nilfunc \
-printf \
-shadow \
-rangeloops \
-unreachable \
-unsafeptr \
-unusedresult \
.
else
GO_VET := @echo "go vet skipped -- not supported on this version of Go"
endif
default: fmt vet lint build quicktest
install:
go get -t -v ./...
build:
go build -v ./...
test:
go test -v $(RACE_FLAG) -cover ./...
quicktest:
go test ./...
# Capture output and force failure when there is non-empty output
fmt:
@echo gofmt -l .
@OUTPUT=`gofmt -l . 2>&1`; \
if [ "$$OUTPUT" ]; then \
echo "gofmt must be run on the following files:"; \
echo "$$OUTPUT"; \
exit 1; \
fi
vet:
$(GO_VET)
# https://github.com/golang/lint
# go get github.com/golang/lint/golint
# Capture output and force failure when there is non-empty output
# Only run on go1.5+
lint:
@echo golint ./...
@OUTPUT=`command -v golint >/dev/null 2>&1 && golint ./... 2>&1`; \
if [ "$$OUTPUT" ]; then \
echo "golint errors:"; \
echo "$$OUTPUT"; \
exit 1; \
fi

54
vendor/gopkg.in/ldap.v3/README.md generated vendored Normal file
View File

@ -0,0 +1,54 @@
[![GoDoc](https://godoc.org/gopkg.in/ldap.v3?status.svg)](https://godoc.org/gopkg.in/ldap.v3)
[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap)
# Basic LDAP v3 functionality for the GO programming language.
## Install
For the latest version use:
go get gopkg.in/ldap.v3
Import the latest version with:
import "gopkg.in/ldap.v3"
## Required Libraries:
- gopkg.in/asn1-ber.v1
## Features:
- Connecting to LDAP server (non-TLS, TLS, STARTTLS)
- Binding to LDAP server
- Searching for entries
- Filter Compile / Decompile
- Paging Search Results
- Modify Requests / Responses
- Add Requests / Responses
- Delete Requests / Responses
- Modify DN Requests / Responses
## Examples:
- search
- modify
## Contributing:
Bug reports and pull requests are welcome!
Before submitting a pull request, please make sure tests and verification scripts pass:
```
make all
```
To set up a pre-push hook to run the tests and verify scripts before pushing:
```
ln -s ../../.githooks/pre-push .git/hooks/pre-push
```
---
The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
The design is licensed under the Creative Commons 3.0 Attributions license.
Read this article for more details: http://blog.golang.org/gopher

100
vendor/gopkg.in/ldap.v3/add.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
//
// https://tools.ietf.org/html/rfc4511
//
// AddRequest ::= [APPLICATION 8] SEQUENCE {
// entry LDAPDN,
// attributes AttributeList }
//
// AttributeList ::= SEQUENCE OF attribute Attribute
package ldap
import (
"log"
ber "gopkg.in/asn1-ber.v1"
)
// Attribute represents an LDAP attribute
type Attribute struct {
// Type is the name of the LDAP attribute
Type string
// Vals are the LDAP attribute values
Vals []string
}
func (a *Attribute) encode() *ber.Packet {
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
for _, value := range a.Vals {
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
}
seq.AppendChild(set)
return seq
}
// AddRequest represents an LDAP AddRequest operation
type AddRequest struct {
// DN identifies the entry being added
DN string
// Attributes list the attributes of the new entry
Attributes []Attribute
// Controls hold optional controls to send with the request
Controls []Control
}
func (req *AddRequest) appendTo(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
for _, attribute := range req.Attributes {
attributes.AppendChild(attribute.encode())
}
pkt.AppendChild(attributes)
envelope.AppendChild(pkt)
if len(req.Controls) > 0 {
envelope.AppendChild(encodeControls(req.Controls))
}
return nil
}
// Attribute adds an attribute with the given type and values
func (req *AddRequest) Attribute(attrType string, attrVals []string) {
req.Attributes = append(req.Attributes, Attribute{Type: attrType, Vals: attrVals})
}
// NewAddRequest returns an AddRequest for the given DN, with no attributes
func NewAddRequest(dn string, controls []Control) *AddRequest {
return &AddRequest{
DN: dn,
Controls: controls,
}
}
// Add performs the given AddRequest
func (l *Conn) Add(addRequest *AddRequest) error {
msgCtx, err := l.doRequest(addRequest)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
packet, err := l.readPacket(msgCtx)
if err != nil {
return err
}
if packet.Children[1].Tag == ApplicationAddResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
}
return nil
}

152
vendor/gopkg.in/ldap.v3/bind.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
package ldap
import (
"errors"
"fmt"
ber "gopkg.in/asn1-ber.v1"
)
// SimpleBindRequest represents a username/password bind operation
type SimpleBindRequest struct {
// Username is the name of the Directory object that the client wishes to bind as
Username string
// Password is the credentials to bind with
Password string
// Controls are optional controls to send with the bind request
Controls []Control
// AllowEmptyPassword sets whether the client allows binding with an empty password
// (normally used for unauthenticated bind).
AllowEmptyPassword bool
}
// SimpleBindResult contains the response from the server
type SimpleBindResult struct {
Controls []Control
}
// NewSimpleBindRequest returns a bind request
func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
return &SimpleBindRequest{
Username: username,
Password: password,
Controls: controls,
AllowEmptyPassword: false,
}
}
func (req *SimpleBindRequest) appendTo(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Username, "User Name"))
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.Password, "Password"))
envelope.AppendChild(pkt)
if len(req.Controls) > 0 {
envelope.AppendChild(encodeControls(req.Controls))
}
return nil
}
// SimpleBind performs the simple bind operation defined in the given request
func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword {
return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
}
msgCtx, err := l.doRequest(simpleBindRequest)
if err != nil {
return nil, err
}
defer l.finishMessage(msgCtx)
packet, err := l.readPacket(msgCtx)
if err != nil {
return nil, err
}
result := &SimpleBindResult{
Controls: make([]Control, 0),
}
if len(packet.Children) == 3 {
for _, child := range packet.Children[2].Children {
decodedChild, decodeErr := DecodeControl(child)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode child control: %s", decodeErr)
}
result.Controls = append(result.Controls, decodedChild)
}
}
err = GetLDAPError(packet)
return result, err
}
// Bind performs a bind with the given username and password.
//
// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method
// for that.
func (l *Conn) Bind(username, password string) error {
req := &SimpleBindRequest{
Username: username,
Password: password,
AllowEmptyPassword: false,
}
_, err := l.SimpleBind(req)
return err
}
// UnauthenticatedBind performs an unauthenticated bind.
//
// A username may be provided for trace (e.g. logging) purpose only, but it is normally not
// authenticated or otherwise validated by the LDAP server.
//
// See https://tools.ietf.org/html/rfc4513#section-5.1.2 .
// See https://tools.ietf.org/html/rfc4513#section-6.3.1 .
func (l *Conn) UnauthenticatedBind(username string) error {
req := &SimpleBindRequest{
Username: username,
Password: "",
AllowEmptyPassword: true,
}
_, err := l.SimpleBind(req)
return err
}
var externalBindRequest = requestFunc(func(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
saslAuth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "EXTERNAL", "SASL Mech"))
saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "SASL Cred"))
pkt.AppendChild(saslAuth)
envelope.AppendChild(pkt)
return nil
})
// ExternalBind performs SASL/EXTERNAL authentication.
//
// Use ldap.DialURL("ldapi://") to connect to the Unix socket before ExternalBind.
//
// See https://tools.ietf.org/html/rfc4422#appendix-A
func (l *Conn) ExternalBind() error {
msgCtx, err := l.doRequest(externalBindRequest)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
packet, err := l.readPacket(msgCtx)
if err != nil {
return err
}
return GetLDAPError(packet)
}

30
vendor/gopkg.in/ldap.v3/client.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package ldap
import (
"crypto/tls"
"time"
)
// Client knows how to interact with an LDAP server
type Client interface {
Start()
StartTLS(*tls.Config) error
Close()
SetTimeout(time.Duration)
Bind(username, password string) error
UnauthenticatedBind(username string) error
SimpleBind(*SimpleBindRequest) (*SimpleBindResult, error)
ExternalBind() error
Add(*AddRequest) error
Del(*DelRequest) error
Modify(*ModifyRequest) error
ModifyDN(*ModifyDNRequest) error
Compare(dn, attribute, value string) (bool, error)
PasswordModify(*PasswordModifyRequest) (*PasswordModifyResult, error)
Search(*SearchRequest) (*SearchResult, error)
SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
}

80
vendor/gopkg.in/ldap.v3/compare.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
// File contains Compare functionality
//
// https://tools.ietf.org/html/rfc4511
//
// CompareRequest ::= [APPLICATION 14] SEQUENCE {
// entry LDAPDN,
// ava AttributeValueAssertion }
//
// AttributeValueAssertion ::= SEQUENCE {
// attributeDesc AttributeDescription,
// assertionValue AssertionValue }
//
// AttributeDescription ::= LDAPString
// -- Constrained to <attributedescription>
// -- [RFC4512]
//
// AttributeValue ::= OCTET STRING
//
package ldap
import (
"fmt"
ber "gopkg.in/asn1-ber.v1"
)
// CompareRequest represents an LDAP CompareRequest operation.
type CompareRequest struct {
DN string
Attribute string
Value string
}
func (req *CompareRequest) appendTo(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Attribute, "AttributeDesc"))
ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Value, "AssertionValue"))
pkt.AppendChild(ava)
envelope.AppendChild(pkt)
return nil
}
// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
// false with any error that occurs if any.
func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
msgCtx, err := l.doRequest(&CompareRequest{
DN: dn,
Attribute: attribute,
Value: value})
if err != nil {
return false, err
}
defer l.finishMessage(msgCtx)
packet, err := l.readPacket(msgCtx)
if err != nil {
return false, err
}
if packet.Children[1].Tag == ApplicationCompareResponse {
err := GetLDAPError(packet)
switch {
case IsErrorWithCode(err, LDAPResultCompareTrue):
return true, nil
case IsErrorWithCode(err, LDAPResultCompareFalse):
return false, nil
default:
return false, err
}
}
return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)
}

522
vendor/gopkg.in/ldap.v3/conn.go generated vendored Normal file
View File

@ -0,0 +1,522 @@
package ldap
import (
"crypto/tls"
"errors"
"fmt"
"log"
"net"
"net/url"
"sync"
"sync/atomic"
"time"
ber "gopkg.in/asn1-ber.v1"
)
const (
// MessageQuit causes the processMessages loop to exit
MessageQuit = 0
// MessageRequest sends a request to the server
MessageRequest = 1
// MessageResponse receives a response from the server
MessageResponse = 2
// MessageFinish indicates the client considers a particular message ID to be finished
MessageFinish = 3
// MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
MessageTimeout = 4
)
const (
// DefaultLdapPort default ldap port for pure TCP connection
DefaultLdapPort = "389"
// DefaultLdapsPort default ldap port for SSL connection
DefaultLdapsPort = "636"
)
// PacketResponse contains the packet or error encountered reading a response
type PacketResponse struct {
// Packet is the packet read from the server
Packet *ber.Packet
// Error is an error encountered while reading
Error error
}
// ReadPacket returns the packet or an error
func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
}
return pr.Packet, pr.Error
}
type messageContext struct {
id int64
// close(done) should only be called from finishMessage()
done chan struct{}
// close(responses) should only be called from processMessages(), and only sent to from sendResponse()
responses chan *PacketResponse
}
// sendResponse should only be called within the processMessages() loop which
// is also responsible for closing the responses channel.
func (msgCtx *messageContext) sendResponse(packet *PacketResponse) {
select {
case msgCtx.responses <- packet:
// Successfully sent packet to message handler.
case <-msgCtx.done:
// The request handler is done and will not receive more
// packets.
}
}
type messagePacket struct {
Op int
MessageID int64
Packet *ber.Packet
Context *messageContext
}
type sendMessageFlags uint
const (
startTLS sendMessageFlags = 1 << iota
)
// Conn represents an LDAP Connection
type Conn struct {
// requestTimeout is loaded atomically
// so we need to ensure 64-bit alignment on 32-bit platforms.
requestTimeout int64
conn net.Conn
isTLS bool
closing uint32
closeErr atomic.Value
isStartingTLS bool
Debug debugging
chanConfirm chan struct{}
messageContexts map[int64]*messageContext
chanMessage chan *messagePacket
chanMessageID chan int64
wgClose sync.WaitGroup
outstandingRequests uint
messageMutex sync.Mutex
}
var _ Client = &Conn{}
// DefaultTimeout is a package-level variable that sets the timeout value
// used for the Dial and DialTLS methods.
//
// WARNING: since this is a package-level variable, setting this value from
// multiple places will probably result in undesired behaviour.
var DefaultTimeout = 60 * time.Second
// Dial connects to the given address on the given network using net.Dial
// and then returns a new Conn for the connection.
func Dial(network, addr string) (*Conn, error) {
c, err := net.DialTimeout(network, addr, DefaultTimeout)
if err != nil {
return nil, NewError(ErrorNetwork, err)
}
conn := NewConn(c, false)
conn.Start()
return conn, nil
}
// DialTLS connects to the given address on the given network using tls.Dial
// and then returns a new Conn for the connection.
func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config)
if err != nil {
return nil, NewError(ErrorNetwork, err)
}
conn := NewConn(c, true)
conn.Start()
return conn, nil
}
// DialURL connects to the given ldap URL vie TCP using tls.Dial or net.Dial if ldaps://
// or ldap:// specified as protocol. On success a new Conn for the connection
// is returned.
func DialURL(addr string) (*Conn, error) {
lurl, err := url.Parse(addr)
if err != nil {
return nil, NewError(ErrorNetwork, err)
}
host, port, err := net.SplitHostPort(lurl.Host)
if err != nil {
// we asume that error is due to missing port
host = lurl.Host
port = ""
}
switch lurl.Scheme {
case "ldapi":
if lurl.Path == "" || lurl.Path == "/" {
lurl.Path = "/var/run/slapd/ldapi"
}
return Dial("unix", lurl.Path)
case "ldap":
if port == "" {
port = DefaultLdapPort
}
return Dial("tcp", net.JoinHostPort(host, port))
case "ldaps":
if port == "" {
port = DefaultLdapsPort
}
tlsConf := &tls.Config{
ServerName: host,
}
return DialTLS("tcp", net.JoinHostPort(host, port), tlsConf)
}
return nil, NewError(ErrorNetwork, fmt.Errorf("Unknown scheme '%s'", lurl.Scheme))
}
// NewConn returns a new Conn using conn for network I/O.
func NewConn(conn net.Conn, isTLS bool) *Conn {
return &Conn{
conn: conn,
chanConfirm: make(chan struct{}),
chanMessageID: make(chan int64),
chanMessage: make(chan *messagePacket, 10),
messageContexts: map[int64]*messageContext{},
requestTimeout: 0,
isTLS: isTLS,
}
}
// Start initializes goroutines to read responses and process messages
func (l *Conn) Start() {
go l.reader()
go l.processMessages()
l.wgClose.Add(1)
}
// IsClosing returns whether or not we're currently closing.
func (l *Conn) IsClosing() bool {
return atomic.LoadUint32(&l.closing) == 1
}
// setClosing sets the closing value to true
func (l *Conn) setClosing() bool {
return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
}
// Close closes the connection.
func (l *Conn) Close() {
l.messageMutex.Lock()
defer l.messageMutex.Unlock()
if l.setClosing() {
l.Debug.Printf("Sending quit message and waiting for confirmation")
l.chanMessage <- &messagePacket{Op: MessageQuit}
<-l.chanConfirm
close(l.chanMessage)
l.Debug.Printf("Closing network connection")
if err := l.conn.Close(); err != nil {
log.Println(err)
}
l.wgClose.Done()
}
l.wgClose.Wait()
}
// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
func (l *Conn) SetTimeout(timeout time.Duration) {
if timeout > 0 {
atomic.StoreInt64(&l.requestTimeout, int64(timeout))
}
}
// Returns the next available messageID
func (l *Conn) nextMessageID() int64 {
if messageID, ok := <-l.chanMessageID; ok {
return messageID
}
return 0
}
// StartTLS sends the command to start a TLS session and then creates a new TLS Client
func (l *Conn) StartTLS(config *tls.Config) error {
if l.isTLS {
return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
}
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
packet.AppendChild(request)
l.Debug.PrintPacket(packet)
msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
}
packet, err = packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return err
}
if l.Debug {
if err := addLDAPDescriptions(packet); err != nil {
l.Close()
return err
}
ber.PrintPacket(packet)
}
if err := GetLDAPError(packet); err == nil {
conn := tls.Client(l.conn, config)
if connErr := conn.Handshake(); connErr != nil {
l.Close()
return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr))
}
l.isTLS = true
l.conn = conn
} else {
return err
}
go l.reader()
return nil
}
// TLSConnectionState returns the client's TLS connection state.
// The return values are their zero values if StartTLS did
// not succeed.
func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
tc, ok := l.conn.(*tls.Conn)
if !ok {
return
}
return tc.ConnectionState(), true
}
func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
return l.sendMessageWithFlags(packet, 0)
}
func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
if l.IsClosing() {
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
}
l.messageMutex.Lock()
l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
if l.isStartingTLS {
l.messageMutex.Unlock()
return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
}
if flags&startTLS != 0 {
if l.outstandingRequests != 0 {
l.messageMutex.Unlock()
return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
}
l.isStartingTLS = true
}
l.outstandingRequests++
l.messageMutex.Unlock()
responses := make(chan *PacketResponse)
messageID := packet.Children[0].Value.(int64)
message := &messagePacket{
Op: MessageRequest,
MessageID: messageID,
Packet: packet,
Context: &messageContext{
id: messageID,
done: make(chan struct{}),
responses: responses,
},
}
l.sendProcessMessage(message)
return message.Context, nil
}
func (l *Conn) finishMessage(msgCtx *messageContext) {
close(msgCtx.done)
if l.IsClosing() {
return
}
l.messageMutex.Lock()
l.outstandingRequests--
if l.isStartingTLS {
l.isStartingTLS = false
}
l.messageMutex.Unlock()
message := &messagePacket{
Op: MessageFinish,
MessageID: msgCtx.id,
}
l.sendProcessMessage(message)
}
func (l *Conn) sendProcessMessage(message *messagePacket) bool {
l.messageMutex.Lock()
defer l.messageMutex.Unlock()
if l.IsClosing() {
return false
}
l.chanMessage <- message
return true
}
func (l *Conn) processMessages() {
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in processMessages: %v", err)
}
for messageID, msgCtx := range l.messageContexts {
// If we are closing due to an error, inform anyone who
// is waiting about the error.
if l.IsClosing() && l.closeErr.Load() != nil {
msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)})
}
l.Debug.Printf("Closing channel for MessageID %d", messageID)
close(msgCtx.responses)
delete(l.messageContexts, messageID)
}
close(l.chanMessageID)
close(l.chanConfirm)
}()
var messageID int64 = 1
for {
select {
case l.chanMessageID <- messageID:
messageID++
case message := <-l.chanMessage:
switch message.Op {
case MessageQuit:
l.Debug.Printf("Shutting down - quit message received")
return
case MessageRequest:
// Add to message list and write to network
l.Debug.Printf("Sending message %d", message.MessageID)
buf := message.Packet.Bytes()
_, err := l.conn.Write(buf)
if err != nil {
l.Debug.Printf("Error Sending Message: %s", err.Error())
message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)})
close(message.Context.responses)
break
}
// Only add to messageContexts if we were able to
// successfully write the message.
l.messageContexts[message.MessageID] = message.Context
// Add timeout if defined
requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout))
if requestTimeout > 0 {
go func() {
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
}
}()
time.Sleep(requestTimeout)
timeoutMessage := &messagePacket{
Op: MessageTimeout,
MessageID: message.MessageID,
}
l.sendProcessMessage(timeoutMessage)
}()
}
case MessageResponse:
l.Debug.Printf("Receiving message %d", message.MessageID)
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
} else {
log.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing())
ber.PrintPacket(message.Packet)
}
case MessageTimeout:
// Handle the timeout by closing the channel
// All reads will return immediately
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")})
delete(l.messageContexts, message.MessageID)
close(msgCtx.responses)
}
case MessageFinish:
l.Debug.Printf("Finished message %d", message.MessageID)
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
delete(l.messageContexts, message.MessageID)
close(msgCtx.responses)
}
}
}
}
}
func (l *Conn) reader() {
cleanstop := false
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in reader: %v", err)
}
if !cleanstop {
l.Close()
}
}()
for {
if cleanstop {
l.Debug.Printf("reader clean stopping (without closing the connection)")
return
}
packet, err := ber.ReadPacket(l.conn)
if err != nil {
// A read error is expected here if we are closing the connection...
if !l.IsClosing() {
l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
l.Debug.Printf("reader error: %s", err)
}
return
}
if err := addLDAPDescriptions(packet); err != nil {
l.Debug.Printf("descriptions error: %s", err)
}
if len(packet.Children) == 0 {
l.Debug.Printf("Received bad ldap packet")
continue
}
l.messageMutex.Lock()
if l.isStartingTLS {
cleanstop = true
}
l.messageMutex.Unlock()
message := &messagePacket{
Op: MessageResponse,
MessageID: packet.Children[0].Value.(int64),
Packet: packet,
}
if !l.sendProcessMessage(message) {
return
}
}
}

499
vendor/gopkg.in/ldap.v3/control.go generated vendored Normal file
View File

@ -0,0 +1,499 @@
package ldap
import (
"fmt"
"strconv"
"gopkg.in/asn1-ber.v1"
)
const (
// ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
ControlTypePaging = "1.2.840.113556.1.4.319"
// ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
// ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
// ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
// ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
// ControlTypeMicrosoftNotification - https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx
ControlTypeMicrosoftNotification = "1.2.840.113556.1.4.528"
// ControlTypeMicrosoftShowDeleted - https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx
ControlTypeMicrosoftShowDeleted = "1.2.840.113556.1.4.417"
)
// ControlTypeMap maps controls to text descriptions
var ControlTypeMap = map[string]string{
ControlTypePaging: "Paging",
ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
ControlTypeManageDsaIT: "Manage DSA IT",
ControlTypeMicrosoftNotification: "Change Notification - Microsoft",
ControlTypeMicrosoftShowDeleted: "Show Deleted Objects - Microsoft",
}
// Control defines an interface controls provide to encode and describe themselves
type Control interface {
// GetControlType returns the OID
GetControlType() string
// Encode returns the ber packet representation
Encode() *ber.Packet
// String returns a human-readable description
String() string
}
// ControlString implements the Control interface for simple controls
type ControlString struct {
ControlType string
Criticality bool
ControlValue string
}
// GetControlType returns the OID
func (c *ControlString) GetControlType() string {
return c.ControlType
}
// Encode returns the ber packet representation
func (c *ControlString) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
if c.Criticality {
packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
}
if c.ControlValue != "" {
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value"))
}
return packet
}
// String returns a human-readable description
func (c *ControlString) String() string {
return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
}
// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
type ControlPaging struct {
// PagingSize indicates the page size
PagingSize uint32
// Cookie is an opaque value returned by the server to track a paging cursor
Cookie []byte
}
// GetControlType returns the OID
func (c *ControlPaging) GetControlType() string {
return ControlTypePaging
}
// Encode returns the ber packet representation
func (c *ControlPaging) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)")
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value")
seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size"))
cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
cookie.Value = c.Cookie
cookie.Data.Write(c.Cookie)
seq.AppendChild(cookie)
p2.AppendChild(seq)
packet.AppendChild(p2)
return packet
}
// String returns a human-readable description
func (c *ControlPaging) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q",
ControlTypeMap[ControlTypePaging],
ControlTypePaging,
false,
c.PagingSize,
c.Cookie)
}
// SetCookie stores the given cookie in the paging control
func (c *ControlPaging) SetCookie(cookie []byte) {
c.Cookie = cookie
}
// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
type ControlBeheraPasswordPolicy struct {
// Expire contains the number of seconds before a password will expire
Expire int64
// Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
Grace int64
// Error indicates the error code
Error int8
// ErrorString is a human readable error
ErrorString string
}
// GetControlType returns the OID
func (c *ControlBeheraPasswordPolicy) GetControlType() string {
return ControlTypeBeheraPasswordPolicy
}
// Encode returns the ber packet representation
func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
return packet
}
// String returns a human-readable description
func (c *ControlBeheraPasswordPolicy) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s",
ControlTypeMap[ControlTypeBeheraPasswordPolicy],
ControlTypeBeheraPasswordPolicy,
false,
c.Expire,
c.Grace,
c.Error,
c.ErrorString)
}
// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
type ControlVChuPasswordMustChange struct {
// MustChange indicates if the password is required to be changed
MustChange bool
}
// GetControlType returns the OID
func (c *ControlVChuPasswordMustChange) GetControlType() string {
return ControlTypeVChuPasswordMustChange
}
// Encode returns the ber packet representation
func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
return nil
}
// String returns a human-readable description
func (c *ControlVChuPasswordMustChange) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t MustChange: %v",
ControlTypeMap[ControlTypeVChuPasswordMustChange],
ControlTypeVChuPasswordMustChange,
false,
c.MustChange)
}
// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
type ControlVChuPasswordWarning struct {
// Expire indicates the time in seconds until the password expires
Expire int64
}
// GetControlType returns the OID
func (c *ControlVChuPasswordWarning) GetControlType() string {
return ControlTypeVChuPasswordWarning
}
// Encode returns the ber packet representation
func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
return nil
}
// String returns a human-readable description
func (c *ControlVChuPasswordWarning) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t Expire: %b",
ControlTypeMap[ControlTypeVChuPasswordWarning],
ControlTypeVChuPasswordWarning,
false,
c.Expire)
}
// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
type ControlManageDsaIT struct {
// Criticality indicates if this control is required
Criticality bool
}
// GetControlType returns the OID
func (c *ControlManageDsaIT) GetControlType() string {
return ControlTypeManageDsaIT
}
// Encode returns the ber packet representation
func (c *ControlManageDsaIT) Encode() *ber.Packet {
//FIXME
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")"))
if c.Criticality {
packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
}
return packet
}
// String returns a human-readable description
func (c *ControlManageDsaIT) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t",
ControlTypeMap[ControlTypeManageDsaIT],
ControlTypeManageDsaIT,
c.Criticality)
}
// NewControlManageDsaIT returns a ControlManageDsaIT control
func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
return &ControlManageDsaIT{Criticality: Criticality}
}
// ControlMicrosoftNotification implements the control described in https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx
type ControlMicrosoftNotification struct{}
// GetControlType returns the OID
func (c *ControlMicrosoftNotification) GetControlType() string {
return ControlTypeMicrosoftNotification
}
// Encode returns the ber packet representation
func (c *ControlMicrosoftNotification) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftNotification, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftNotification]+")"))
return packet
}
// String returns a human-readable description
func (c *ControlMicrosoftNotification) String() string {
return fmt.Sprintf(
"Control Type: %s (%q)",
ControlTypeMap[ControlTypeMicrosoftNotification],
ControlTypeMicrosoftNotification)
}
// NewControlMicrosoftNotification returns a ControlMicrosoftNotification control
func NewControlMicrosoftNotification() *ControlMicrosoftNotification {
return &ControlMicrosoftNotification{}
}
// ControlMicrosoftShowDeleted implements the control described in https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx
type ControlMicrosoftShowDeleted struct{}
// GetControlType returns the OID
func (c *ControlMicrosoftShowDeleted) GetControlType() string {
return ControlTypeMicrosoftShowDeleted
}
// Encode returns the ber packet representation
func (c *ControlMicrosoftShowDeleted) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftShowDeleted, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftShowDeleted]+")"))
return packet
}
// String returns a human-readable description
func (c *ControlMicrosoftShowDeleted) String() string {
return fmt.Sprintf(
"Control Type: %s (%q)",
ControlTypeMap[ControlTypeMicrosoftShowDeleted],
ControlTypeMicrosoftShowDeleted)
}
// NewControlMicrosoftShowDeleted returns a ControlMicrosoftShowDeleted control
func NewControlMicrosoftShowDeleted() *ControlMicrosoftShowDeleted {
return &ControlMicrosoftShowDeleted{}
}
// FindControl returns the first control of the given type in the list, or nil
func FindControl(controls []Control, controlType string) Control {
for _, c := range controls {
if c.GetControlType() == controlType {
return c
}
}
return nil
}
// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
func DecodeControl(packet *ber.Packet) (Control, error) {
var (
ControlType = ""
Criticality = false
value *ber.Packet
)
switch len(packet.Children) {
case 0:
// at least one child is required for control type
return nil, fmt.Errorf("at least one child is required for control type")
case 1:
// just type, no criticality or value
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
ControlType = packet.Children[0].Value.(string)
case 2:
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
ControlType = packet.Children[0].Value.(string)
// Children[1] could be criticality or value (both are optional)
// duck-type on whether this is a boolean
if _, ok := packet.Children[1].Value.(bool); ok {
packet.Children[1].Description = "Criticality"
Criticality = packet.Children[1].Value.(bool)
} else {
packet.Children[1].Description = "Control Value"
value = packet.Children[1]
}
case 3:
packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
ControlType = packet.Children[0].Value.(string)
packet.Children[1].Description = "Criticality"
Criticality = packet.Children[1].Value.(bool)
packet.Children[2].Description = "Control Value"
value = packet.Children[2]
default:
// more than 3 children is invalid
return nil, fmt.Errorf("more than 3 children is invalid for controls")
}
switch ControlType {
case ControlTypeManageDsaIT:
return NewControlManageDsaIT(Criticality), nil
case ControlTypePaging:
value.Description += " (Paging)"
c := new(ControlPaging)
if value.Value != nil {
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
}
value.Data.Truncate(0)
value.Value = nil
value.AppendChild(valueChildren)
}
value = value.Children[0]
value.Description = "Search Control Value"
value.Children[0].Description = "Paging Size"
value.Children[1].Description = "Cookie"
c.PagingSize = uint32(value.Children[0].Value.(int64))
c.Cookie = value.Children[1].Data.Bytes()
value.Children[1].Value = c.Cookie
return c, nil
case ControlTypeBeheraPasswordPolicy:
value.Description += " (Password Policy - Behera)"
c := NewControlBeheraPasswordPolicy()
if value.Value != nil {
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
}
value.Data.Truncate(0)
value.Value = nil
value.AppendChild(valueChildren)
}
sequence := value.Children[0]
for _, child := range sequence.Children {
if child.Tag == 0 {
//Warning
warningPacket := child.Children[0]
packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
}
val, ok := packet.Value.(int64)
if ok {
if warningPacket.Tag == 0 {
//timeBeforeExpiration
c.Expire = val
warningPacket.Value = c.Expire
} else if warningPacket.Tag == 1 {
//graceAuthNsRemaining
c.Grace = val
warningPacket.Value = c.Grace
}
}
} else if child.Tag == 1 {
// Error
packet, err := ber.DecodePacketErr(child.Data.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to decode data bytes: %s", err)
}
val, ok := packet.Value.(int8)
if !ok {
// what to do?
val = -1
}
c.Error = val
child.Value = c.Error
c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error]
}
}
return c, nil
case ControlTypeVChuPasswordMustChange:
c := &ControlVChuPasswordMustChange{MustChange: true}
return c, nil
case ControlTypeVChuPasswordWarning:
c := &ControlVChuPasswordWarning{Expire: -1}
expireStr := ber.DecodeString(value.Data.Bytes())
expire, err := strconv.ParseInt(expireStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse value as int: %s", err)
}
c.Expire = expire
value.Value = c.Expire
return c, nil
case ControlTypeMicrosoftNotification:
return NewControlMicrosoftNotification(), nil
case ControlTypeMicrosoftShowDeleted:
return NewControlMicrosoftShowDeleted(), nil
default:
c := new(ControlString)
c.ControlType = ControlType
c.Criticality = Criticality
if value != nil {
c.ControlValue = value.Value.(string)
}
return c, nil
}
}
// NewControlString returns a generic control
func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
return &ControlString{
ControlType: controlType,
Criticality: criticality,
ControlValue: controlValue,
}
}
// NewControlPaging returns a paging control
func NewControlPaging(pagingSize uint32) *ControlPaging {
return &ControlPaging{PagingSize: pagingSize}
}
// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
return &ControlBeheraPasswordPolicy{
Expire: -1,
Grace: -1,
Error: -1,
}
}
func encodeControls(controls []Control) *ber.Packet {
packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls")
for _, control := range controls {
packet.AppendChild(control.Encode())
}
return packet
}

30
vendor/gopkg.in/ldap.v3/debug.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package ldap
import (
"log"
ber "gopkg.in/asn1-ber.v1"
)
// debugging type
// - has a Printf method to write the debug output
type debugging bool
// Enable controls debugging mode.
func (debug *debugging) Enable(b bool) {
*debug = debugging(b)
}
// Printf writes debug output.
func (debug debugging) Printf(format string, args ...interface{}) {
if debug {
log.Printf(format, args...)
}
}
// PrintPacket dumps a packet.
func (debug debugging) PrintPacket(packet *ber.Packet) {
if debug {
ber.PrintPacket(packet)
}
}

64
vendor/gopkg.in/ldap.v3/del.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
//
// https://tools.ietf.org/html/rfc4511
//
// DelRequest ::= [APPLICATION 10] LDAPDN
package ldap
import (
"log"
ber "gopkg.in/asn1-ber.v1"
)
// DelRequest implements an LDAP deletion request
type DelRequest struct {
// DN is the name of the directory entry to delete
DN string
// Controls hold optional controls to send with the request
Controls []Control
}
func (req *DelRequest) appendTo(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, req.DN, "Del Request")
pkt.Data.Write([]byte(req.DN))
envelope.AppendChild(pkt)
if len(req.Controls) > 0 {
envelope.AppendChild(encodeControls(req.Controls))
}
return nil
}
// NewDelRequest creates a delete request for the given DN and controls
func NewDelRequest(DN string, Controls []Control) *DelRequest {
return &DelRequest{
DN: DN,
Controls: Controls,
}
}
// Del executes the given delete request
func (l *Conn) Del(delRequest *DelRequest) error {
msgCtx, err := l.doRequest(delRequest)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
packet, err := l.readPacket(msgCtx)
if err != nil {
return err
}
if packet.Children[1].Tag == ApplicationDelResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
}
return nil
}

247
vendor/gopkg.in/ldap.v3/dn.go generated vendored Normal file
View File

@ -0,0 +1,247 @@
// File contains DN parsing functionality
//
// https://tools.ietf.org/html/rfc4514
//
// distinguishedName = [ relativeDistinguishedName
// *( COMMA relativeDistinguishedName ) ]
// relativeDistinguishedName = attributeTypeAndValue
// *( PLUS attributeTypeAndValue )
// attributeTypeAndValue = attributeType EQUALS attributeValue
// attributeType = descr / numericoid
// attributeValue = string / hexstring
//
// ; The following characters are to be escaped when they appear
// ; in the value to be encoded: ESC, one of <escaped>, leading
// ; SHARP or SPACE, trailing SPACE, and NULL.
// string = [ ( leadchar / pair ) [ *( stringchar / pair )
// ( trailchar / pair ) ] ]
//
// leadchar = LUTF1 / UTFMB
// LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A /
// %x3D / %x3F-5B / %x5D-7F
//
// trailchar = TUTF1 / UTFMB
// TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A /
// %x3D / %x3F-5B / %x5D-7F
//
// stringchar = SUTF1 / UTFMB
// SUTF1 = %x01-21 / %x23-2A / %x2D-3A /
// %x3D / %x3F-5B / %x5D-7F
//
// pair = ESC ( ESC / special / hexpair )
// special = escaped / SPACE / SHARP / EQUALS
// escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE
// hexstring = SHARP 1*hexpair
// hexpair = HEX HEX
//
// where the productions <descr>, <numericoid>, <COMMA>, <DQUOTE>,
// <EQUALS>, <ESC>, <HEX>, <LANGLE>, <NULL>, <PLUS>, <RANGLE>, <SEMI>,
// <SPACE>, <SHARP>, and <UTFMB> are defined in [RFC4512].
//
package ldap
import (
"bytes"
enchex "encoding/hex"
"errors"
"fmt"
"strings"
"gopkg.in/asn1-ber.v1"
)
// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
type AttributeTypeAndValue struct {
// Type is the attribute type
Type string
// Value is the attribute value
Value string
}
// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
type RelativeDN struct {
Attributes []*AttributeTypeAndValue
}
// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
type DN struct {
RDNs []*RelativeDN
}
// ParseDN returns a distinguishedName or an error
func ParseDN(str string) (*DN, error) {
dn := new(DN)
dn.RDNs = make([]*RelativeDN, 0)
rdn := new(RelativeDN)
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
buffer := bytes.Buffer{}
attribute := new(AttributeTypeAndValue)
escaping := false
unescapedTrailingSpaces := 0
stringFromBuffer := func() string {
s := buffer.String()
s = s[0 : len(s)-unescapedTrailingSpaces]
buffer.Reset()
unescapedTrailingSpaces = 0
return s
}
for i := 0; i < len(str); i++ {
char := str[i]
switch {
case escaping:
unescapedTrailingSpaces = 0
escaping = false
switch char {
case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
buffer.WriteByte(char)
continue
}
// Not a special character, assume hex encoded octet
if len(str) == i+1 {
return nil, errors.New("got corrupted escaped character")
}
dst := []byte{0}
n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
if err != nil {
return nil, fmt.Errorf("failed to decode escaped character: %s", err)
} else if n != 1 {
return nil, fmt.Errorf("expected 1 byte when un-escaping, got %d", n)
}
buffer.WriteByte(dst[0])
i++
case char == '\\':
unescapedTrailingSpaces = 0
escaping = true
case char == '=':
attribute.Type = stringFromBuffer()
// Special case: If the first character in the value is # the
// following data is BER encoded so we can just fast forward
// and decode.
if len(str) > i+1 && str[i+1] == '#' {
i += 2
index := strings.IndexAny(str[i:], ",+")
data := str
if index > 0 {
data = str[i : i+index]
} else {
data = str[i:]
}
rawBER, err := enchex.DecodeString(data)
if err != nil {
return nil, fmt.Errorf("failed to decode BER encoding: %s", err)
}
packet, err := ber.DecodePacketErr(rawBER)
if err != nil {
return nil, fmt.Errorf("failed to decode BER packet: %s", err)
}
buffer.WriteString(packet.Data.String())
i += len(data) - 1
}
case char == ',' || char == '+':
// We're done with this RDN or value, push it
if len(attribute.Type) == 0 {
return nil, errors.New("incomplete type, value pair")
}
attribute.Value = stringFromBuffer()
rdn.Attributes = append(rdn.Attributes, attribute)
attribute = new(AttributeTypeAndValue)
if char == ',' {
dn.RDNs = append(dn.RDNs, rdn)
rdn = new(RelativeDN)
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
}
case char == ' ' && buffer.Len() == 0:
// ignore unescaped leading spaces
continue
default:
if char == ' ' {
// Track unescaped spaces in case they are trailing and we need to remove them
unescapedTrailingSpaces++
} else {
// Reset if we see a non-space char
unescapedTrailingSpaces = 0
}
buffer.WriteByte(char)
}
}
if buffer.Len() > 0 {
if len(attribute.Type) == 0 {
return nil, errors.New("DN ended with incomplete type, value pair")
}
attribute.Value = stringFromBuffer()
rdn.Attributes = append(rdn.Attributes, attribute)
dn.RDNs = append(dn.RDNs, rdn)
}
return dn, nil
}
// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
// Returns true if they have the same number of relative distinguished names
// and corresponding relative distinguished names (by position) are the same.
func (d *DN) Equal(other *DN) bool {
if len(d.RDNs) != len(other.RDNs) {
return false
}
for i := range d.RDNs {
if !d.RDNs[i].Equal(other.RDNs[i]) {
return false
}
}
return true
}
// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
func (d *DN) AncestorOf(other *DN) bool {
if len(d.RDNs) >= len(other.RDNs) {
return false
}
// Take the last `len(d.RDNs)` RDNs from the other DN to compare against
otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
for i := range d.RDNs {
if !d.RDNs[i].Equal(otherRDNs[i]) {
return false
}
}
return true
}
// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
// The order of attributes is not significant.
// Case of attribute types is not significant.
func (r *RelativeDN) Equal(other *RelativeDN) bool {
if len(r.Attributes) != len(other.Attributes) {
return false
}
return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
}
func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
for _, attr := range attrs {
found := false
for _, myattr := range r.Attributes {
if myattr.Equal(attr) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
// Case of the attribute type is not significant
func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
}

4
vendor/gopkg.in/ldap.v3/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
/*
Package ldap provides basic LDAP v3 functionality.
*/
package ldap

236
vendor/gopkg.in/ldap.v3/error.go generated vendored Normal file
View File

@ -0,0 +1,236 @@
package ldap
import (
"fmt"
ber "gopkg.in/asn1-ber.v1"
)
// LDAP Result Codes
const (
LDAPResultSuccess = 0
LDAPResultOperationsError = 1
LDAPResultProtocolError = 2
LDAPResultTimeLimitExceeded = 3
LDAPResultSizeLimitExceeded = 4
LDAPResultCompareFalse = 5
LDAPResultCompareTrue = 6
LDAPResultAuthMethodNotSupported = 7
LDAPResultStrongAuthRequired = 8
LDAPResultReferral = 10
LDAPResultAdminLimitExceeded = 11
LDAPResultUnavailableCriticalExtension = 12
LDAPResultConfidentialityRequired = 13
LDAPResultSaslBindInProgress = 14
LDAPResultNoSuchAttribute = 16
LDAPResultUndefinedAttributeType = 17
LDAPResultInappropriateMatching = 18
LDAPResultConstraintViolation = 19
LDAPResultAttributeOrValueExists = 20
LDAPResultInvalidAttributeSyntax = 21
LDAPResultNoSuchObject = 32
LDAPResultAliasProblem = 33
LDAPResultInvalidDNSyntax = 34
LDAPResultIsLeaf = 35
LDAPResultAliasDereferencingProblem = 36
LDAPResultInappropriateAuthentication = 48
LDAPResultInvalidCredentials = 49
LDAPResultInsufficientAccessRights = 50
LDAPResultBusy = 51
LDAPResultUnavailable = 52
LDAPResultUnwillingToPerform = 53
LDAPResultLoopDetect = 54
LDAPResultSortControlMissing = 60
LDAPResultOffsetRangeError = 61
LDAPResultNamingViolation = 64
LDAPResultObjectClassViolation = 65
LDAPResultNotAllowedOnNonLeaf = 66
LDAPResultNotAllowedOnRDN = 67
LDAPResultEntryAlreadyExists = 68
LDAPResultObjectClassModsProhibited = 69
LDAPResultResultsTooLarge = 70
LDAPResultAffectsMultipleDSAs = 71
LDAPResultVirtualListViewErrorOrControlError = 76
LDAPResultOther = 80
LDAPResultServerDown = 81
LDAPResultLocalError = 82
LDAPResultEncodingError = 83
LDAPResultDecodingError = 84
LDAPResultTimeout = 85
LDAPResultAuthUnknown = 86
LDAPResultFilterError = 87
LDAPResultUserCanceled = 88
LDAPResultParamError = 89
LDAPResultNoMemory = 90
LDAPResultConnectError = 91
LDAPResultNotSupported = 92
LDAPResultControlNotFound = 93
LDAPResultNoResultsReturned = 94
LDAPResultMoreResultsToReturn = 95
LDAPResultClientLoop = 96
LDAPResultReferralLimitExceeded = 97
LDAPResultInvalidResponse = 100
LDAPResultAmbiguousResponse = 101
LDAPResultTLSNotSupported = 112
LDAPResultIntermediateResponse = 113
LDAPResultUnknownType = 114
LDAPResultCanceled = 118
LDAPResultNoSuchOperation = 119
LDAPResultTooLate = 120
LDAPResultCannotCancel = 121
LDAPResultAssertionFailed = 122
LDAPResultAuthorizationDenied = 123
LDAPResultSyncRefreshRequired = 4096
ErrorNetwork = 200
ErrorFilterCompile = 201
ErrorFilterDecompile = 202
ErrorDebugging = 203
ErrorUnexpectedMessage = 204
ErrorUnexpectedResponse = 205
ErrorEmptyPassword = 206
)
// LDAPResultCodeMap contains string descriptions for LDAP error codes
var LDAPResultCodeMap = map[uint16]string{
LDAPResultSuccess: "Success",
LDAPResultOperationsError: "Operations Error",
LDAPResultProtocolError: "Protocol Error",
LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
LDAPResultCompareFalse: "Compare False",
LDAPResultCompareTrue: "Compare True",
LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
LDAPResultStrongAuthRequired: "Strong Auth Required",
LDAPResultReferral: "Referral",
LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
LDAPResultConfidentialityRequired: "Confidentiality Required",
LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
LDAPResultNoSuchAttribute: "No Such Attribute",
LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
LDAPResultInappropriateMatching: "Inappropriate Matching",
LDAPResultConstraintViolation: "Constraint Violation",
LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
LDAPResultNoSuchObject: "No Such Object",
LDAPResultAliasProblem: "Alias Problem",
LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
LDAPResultIsLeaf: "Is Leaf",
LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
LDAPResultInvalidCredentials: "Invalid Credentials",
LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
LDAPResultBusy: "Busy",
LDAPResultUnavailable: "Unavailable",
LDAPResultUnwillingToPerform: "Unwilling To Perform",
LDAPResultLoopDetect: "Loop Detect",
LDAPResultSortControlMissing: "Sort Control Missing",
LDAPResultOffsetRangeError: "Result Offset Range Error",
LDAPResultNamingViolation: "Naming Violation",
LDAPResultObjectClassViolation: "Object Class Violation",
LDAPResultResultsTooLarge: "Results Too Large",
LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
LDAPResultEntryAlreadyExists: "Entry Already Exists",
LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view",
LDAPResultOther: "Other",
LDAPResultServerDown: "Cannot establish a connection",
LDAPResultLocalError: "An error occurred",
LDAPResultEncodingError: "LDAP encountered an error while encoding",
LDAPResultDecodingError: "LDAP encountered an error while decoding",
LDAPResultTimeout: "LDAP timeout while waiting for a response from the server",
LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown",
LDAPResultFilterError: "An error occurred while encoding the given search filter",
LDAPResultUserCanceled: "The user canceled the operation",
LDAPResultParamError: "An invalid parameter was specified",
LDAPResultNoMemory: "Out of memory error",
LDAPResultConnectError: "A connection to the server could not be established",
LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP",
LDAPResultControlNotFound: "The controls required to perform the requested operation were not found",
LDAPResultNoResultsReturned: "No results were returned from the server",
LDAPResultMoreResultsToReturn: "There are more results in the chain of results",
LDAPResultClientLoop: "A loop has been detected. For example when following referrals",
LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded",
LDAPResultCanceled: "Operation was canceled",
LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation",
LDAPResultTooLate: "Too late to cancel the outstanding operation",
LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed",
LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed",
LDAPResultSyncRefreshRequired: "Refresh Required",
LDAPResultInvalidResponse: "Invalid Response",
LDAPResultAmbiguousResponse: "Ambiguous Response",
LDAPResultTLSNotSupported: "Tls Not Supported",
LDAPResultIntermediateResponse: "Intermediate Response",
LDAPResultUnknownType: "Unknown Type",
LDAPResultAuthorizationDenied: "Authorization Denied",
ErrorNetwork: "Network Error",
ErrorFilterCompile: "Filter Compile Error",
ErrorFilterDecompile: "Filter Decompile Error",
ErrorDebugging: "Debugging Error",
ErrorUnexpectedMessage: "Unexpected Message",
ErrorUnexpectedResponse: "Unexpected Response",
ErrorEmptyPassword: "Empty password not allowed by the client",
}
// Error holds LDAP error information
type Error struct {
// Err is the underlying error
Err error
// ResultCode is the LDAP error code
ResultCode uint16
// MatchedDN is the matchedDN returned if any
MatchedDN string
}
func (e *Error) Error() string {
return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
}
// GetLDAPError creates an Error out of a BER packet representing a LDAPResult
// The return is an error object. It can be casted to a Error structure.
// This function returns nil if resultCode in the LDAPResult sequence is success(0).
func GetLDAPError(packet *ber.Packet) error {
if packet == nil {
return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")}
}
if len(packet.Children) >= 2 {
response := packet.Children[1]
if response == nil {
return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet")}
}
if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
resultCode := uint16(response.Children[0].Value.(int64))
if resultCode == 0 { // No error
return nil
}
return &Error{ResultCode: resultCode, MatchedDN: response.Children[1].Value.(string),
Err: fmt.Errorf("%s", response.Children[2].Value.(string))}
}
}
return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format")}
}
// NewError creates an LDAP error with the given code and underlying error
func NewError(resultCode uint16, err error) error {
return &Error{ResultCode: resultCode, Err: err}
}
// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
func IsErrorWithCode(err error, desiredResultCode uint16) bool {
if err == nil {
return false
}
serverError, ok := err.(*Error)
if !ok {
return false
}
return serverError.ResultCode == desiredResultCode
}

465
vendor/gopkg.in/ldap.v3/filter.go generated vendored Normal file
View File

@ -0,0 +1,465 @@
package ldap
import (
"bytes"
hexpac "encoding/hex"
"errors"
"fmt"
"strings"
"unicode/utf8"
"gopkg.in/asn1-ber.v1"
)
// Filter choices
const (
FilterAnd = 0
FilterOr = 1
FilterNot = 2
FilterEqualityMatch = 3
FilterSubstrings = 4
FilterGreaterOrEqual = 5
FilterLessOrEqual = 6
FilterPresent = 7
FilterApproxMatch = 8
FilterExtensibleMatch = 9
)
// FilterMap contains human readable descriptions of Filter choices
var FilterMap = map[uint64]string{
FilterAnd: "And",
FilterOr: "Or",
FilterNot: "Not",
FilterEqualityMatch: "Equality Match",
FilterSubstrings: "Substrings",
FilterGreaterOrEqual: "Greater Or Equal",
FilterLessOrEqual: "Less Or Equal",
FilterPresent: "Present",
FilterApproxMatch: "Approx Match",
FilterExtensibleMatch: "Extensible Match",
}
// SubstringFilter options
const (
FilterSubstringsInitial = 0
FilterSubstringsAny = 1
FilterSubstringsFinal = 2
)
// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
var FilterSubstringsMap = map[uint64]string{
FilterSubstringsInitial: "Substrings Initial",
FilterSubstringsAny: "Substrings Any",
FilterSubstringsFinal: "Substrings Final",
}
// MatchingRuleAssertion choices
const (
MatchingRuleAssertionMatchingRule = 1
MatchingRuleAssertionType = 2
MatchingRuleAssertionMatchValue = 3
MatchingRuleAssertionDNAttributes = 4
)
// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
var MatchingRuleAssertionMap = map[uint64]string{
MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
MatchingRuleAssertionType: "Matching Rule Assertion Type",
MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value",
MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
}
// CompileFilter converts a string representation of a filter into a BER-encoded packet
func CompileFilter(filter string) (*ber.Packet, error) {
if len(filter) == 0 || filter[0] != '(' {
return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
}
packet, pos, err := compileFilter(filter, 1)
if err != nil {
return nil, err
}
switch {
case pos > len(filter):
return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
case pos < len(filter):
return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
}
return packet, nil
}
// DecompileFilter converts a packet representation of a filter into a string representation
func DecompileFilter(packet *ber.Packet) (ret string, err error) {
defer func() {
if r := recover(); r != nil {
err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
}
}()
ret = "("
err = nil
childStr := ""
switch packet.Tag {
case FilterAnd:
ret += "&"
for _, child := range packet.Children {
childStr, err = DecompileFilter(child)
if err != nil {
return
}
ret += childStr
}
case FilterOr:
ret += "|"
for _, child := range packet.Children {
childStr, err = DecompileFilter(child)
if err != nil {
return
}
ret += childStr
}
case FilterNot:
ret += "!"
childStr, err = DecompileFilter(packet.Children[0])
if err != nil {
return
}
ret += childStr
case FilterSubstrings:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += "="
for i, child := range packet.Children[1].Children {
if i == 0 && child.Tag != FilterSubstringsInitial {
ret += "*"
}
ret += EscapeFilter(ber.DecodeString(child.Data.Bytes()))
if child.Tag != FilterSubstringsFinal {
ret += "*"
}
}
case FilterEqualityMatch:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += "="
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
case FilterGreaterOrEqual:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += ">="
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
case FilterLessOrEqual:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += "<="
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
case FilterPresent:
ret += ber.DecodeString(packet.Data.Bytes())
ret += "=*"
case FilterApproxMatch:
ret += ber.DecodeString(packet.Children[0].Data.Bytes())
ret += "~="
ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
case FilterExtensibleMatch:
attr := ""
dnAttributes := false
matchingRule := ""
value := ""
for _, child := range packet.Children {
switch child.Tag {
case MatchingRuleAssertionMatchingRule:
matchingRule = ber.DecodeString(child.Data.Bytes())
case MatchingRuleAssertionType:
attr = ber.DecodeString(child.Data.Bytes())
case MatchingRuleAssertionMatchValue:
value = ber.DecodeString(child.Data.Bytes())
case MatchingRuleAssertionDNAttributes:
dnAttributes = child.Value.(bool)
}
}
if len(attr) > 0 {
ret += attr
}
if dnAttributes {
ret += ":dn"
}
if len(matchingRule) > 0 {
ret += ":"
ret += matchingRule
}
ret += ":="
ret += EscapeFilter(value)
}
ret += ")"
return
}
func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
for pos < len(filter) && filter[pos] == '(' {
child, newPos, err := compileFilter(filter, pos+1)
if err != nil {
return pos, err
}
pos = newPos
parent.AppendChild(child)
}
if pos == len(filter) {
return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
}
return pos + 1, nil
}
func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
var (
packet *ber.Packet
err error
)
defer func() {
if r := recover(); r != nil {
err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
}
}()
newPos := pos
currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
switch currentRune {
case utf8.RuneError:
return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
case '(':
packet, newPos, err = compileFilter(filter, pos+currentWidth)
newPos++
return packet, newPos, err
case '&':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
return packet, newPos, err
case '|':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
return packet, newPos, err
case '!':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
var child *ber.Packet
child, newPos, err = compileFilter(filter, pos+currentWidth)
packet.AppendChild(child)
return packet, newPos, err
default:
const (
stateReadingAttr = 0
stateReadingExtensibleMatchingRule = 1
stateReadingCondition = 2
)
state := stateReadingAttr
attribute := ""
extensibleDNAttributes := false
extensibleMatchingRule := ""
condition := ""
for newPos < len(filter) {
remainingFilter := filter[newPos:]
currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
if currentRune == ')' {
break
}
if currentRune == utf8.RuneError {
return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
}
switch state {
case stateReadingAttr:
switch {
// Extensible rule, with only DN-matching
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
extensibleDNAttributes = true
state = stateReadingCondition
newPos += 5
// Extensible rule, with DN-matching and a matching OID
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
extensibleDNAttributes = true
state = stateReadingExtensibleMatchingRule
newPos += 4
// Extensible rule, with attr only
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
state = stateReadingCondition
newPos += 2
// Extensible rule, with no DN attribute matching
case currentRune == ':':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
state = stateReadingExtensibleMatchingRule
newPos++
// Equality condition
case currentRune == '=':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
state = stateReadingCondition
newPos++
// Greater-than or equal
case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
state = stateReadingCondition
newPos += 2
// Less-than or equal
case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
state = stateReadingCondition
newPos += 2
// Approx
case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
state = stateReadingCondition
newPos += 2
// Still reading the attribute name
default:
attribute += fmt.Sprintf("%c", currentRune)
newPos += currentWidth
}
case stateReadingExtensibleMatchingRule:
switch {
// Matching rule OID is done
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
state = stateReadingCondition
newPos += 2
// Still reading the matching rule oid
default:
extensibleMatchingRule += fmt.Sprintf("%c", currentRune)
newPos += currentWidth
}
case stateReadingCondition:
// append to the condition
condition += fmt.Sprintf("%c", currentRune)
newPos += currentWidth
}
}
if newPos == len(filter) {
err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
return packet, newPos, err
}
if packet == nil {
err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
return packet, newPos, err
}
switch {
case packet.Tag == FilterExtensibleMatch:
// MatchingRuleAssertion ::= SEQUENCE {
// matchingRule [1] MatchingRuleID OPTIONAL,
// type [2] AttributeDescription OPTIONAL,
// matchValue [3] AssertionValue,
// dnAttributes [4] BOOLEAN DEFAULT FALSE
// }
// Include the matching rule oid, if specified
if len(extensibleMatchingRule) > 0 {
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
}
// Include the attribute, if specified
if len(attribute) > 0 {
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType]))
}
// Add the value (only required child)
encodedString, encodeErr := escapedStringToEncodedBytes(condition)
if encodeErr != nil {
return packet, newPos, encodeErr
}
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
// Defaults to false, so only include in the sequence if true
if extensibleDNAttributes {
packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
}
case packet.Tag == FilterEqualityMatch && condition == "*":
packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent])
case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"):
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
packet.Tag = FilterSubstrings
packet.Description = FilterMap[uint64(packet.Tag)]
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
parts := strings.Split(condition, "*")
for i, part := range parts {
if part == "" {
continue
}
var tag ber.Tag
switch i {
case 0:
tag = FilterSubstringsInitial
case len(parts) - 1:
tag = FilterSubstringsFinal
default:
tag = FilterSubstringsAny
}
encodedString, encodeErr := escapedStringToEncodedBytes(part)
if encodeErr != nil {
return packet, newPos, encodeErr
}
seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
}
packet.AppendChild(seq)
default:
encodedString, encodeErr := escapedStringToEncodedBytes(condition)
if encodeErr != nil {
return packet, newPos, encodeErr
}
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
}
newPos += currentWidth
return packet, newPos, err
}
}
// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
func escapedStringToEncodedBytes(escapedString string) (string, error) {
var buffer bytes.Buffer
i := 0
for i < len(escapedString) {
currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:])
if currentRune == utf8.RuneError {
return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i))
}
// Check for escaped hex characters and convert them to their literal value for transport.
if currentRune == '\\' {
// http://tools.ietf.org/search/rfc4515
// \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
// being a member of UTF1SUBSET.
if i+2 > len(escapedString) {
return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
}
escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3])
if decodeErr != nil {
return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter"))
}
buffer.WriteByte(escByte[0])
i += 2 // +1 from end of loop, so 3 total for \xx.
} else {
buffer.WriteRune(currentRune)
}
i += currentWidth
}
return buffer.String(), nil
}

3
vendor/gopkg.in/ldap.v3/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module gopkg.in/ldap.v3
require gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d

2
vendor/gopkg.in/ldap.v3/go.sum generated vendored Normal file
View File

@ -0,0 +1,2 @@
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=

340
vendor/gopkg.in/ldap.v3/ldap.go generated vendored Normal file
View File

@ -0,0 +1,340 @@
package ldap
import (
"fmt"
"io/ioutil"
"os"
ber "gopkg.in/asn1-ber.v1"
)
// LDAP Application Codes
const (
ApplicationBindRequest = 0
ApplicationBindResponse = 1
ApplicationUnbindRequest = 2
ApplicationSearchRequest = 3
ApplicationSearchResultEntry = 4
ApplicationSearchResultDone = 5
ApplicationModifyRequest = 6
ApplicationModifyResponse = 7
ApplicationAddRequest = 8
ApplicationAddResponse = 9
ApplicationDelRequest = 10
ApplicationDelResponse = 11
ApplicationModifyDNRequest = 12
ApplicationModifyDNResponse = 13
ApplicationCompareRequest = 14
ApplicationCompareResponse = 15
ApplicationAbandonRequest = 16
ApplicationSearchResultReference = 19
ApplicationExtendedRequest = 23
ApplicationExtendedResponse = 24
)
// ApplicationMap contains human readable descriptions of LDAP Application Codes
var ApplicationMap = map[uint8]string{
ApplicationBindRequest: "Bind Request",
ApplicationBindResponse: "Bind Response",
ApplicationUnbindRequest: "Unbind Request",
ApplicationSearchRequest: "Search Request",
ApplicationSearchResultEntry: "Search Result Entry",
ApplicationSearchResultDone: "Search Result Done",
ApplicationModifyRequest: "Modify Request",
ApplicationModifyResponse: "Modify Response",
ApplicationAddRequest: "Add Request",
ApplicationAddResponse: "Add Response",
ApplicationDelRequest: "Del Request",
ApplicationDelResponse: "Del Response",
ApplicationModifyDNRequest: "Modify DN Request",
ApplicationModifyDNResponse: "Modify DN Response",
ApplicationCompareRequest: "Compare Request",
ApplicationCompareResponse: "Compare Response",
ApplicationAbandonRequest: "Abandon Request",
ApplicationSearchResultReference: "Search Result Reference",
ApplicationExtendedRequest: "Extended Request",
ApplicationExtendedResponse: "Extended Response",
}
// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
const (
BeheraPasswordExpired = 0
BeheraAccountLocked = 1
BeheraChangeAfterReset = 2
BeheraPasswordModNotAllowed = 3
BeheraMustSupplyOldPassword = 4
BeheraInsufficientPasswordQuality = 5
BeheraPasswordTooShort = 6
BeheraPasswordTooYoung = 7
BeheraPasswordInHistory = 8
)
// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
var BeheraPasswordPolicyErrorMap = map[int8]string{
BeheraPasswordExpired: "Password expired",
BeheraAccountLocked: "Account locked",
BeheraChangeAfterReset: "Password must be changed",
BeheraPasswordModNotAllowed: "Policy prevents password modification",
BeheraMustSupplyOldPassword: "Policy requires old password in order to change password",
BeheraInsufficientPasswordQuality: "Password fails quality checks",
BeheraPasswordTooShort: "Password is too short for policy",
BeheraPasswordTooYoung: "Password has been changed too recently",
BeheraPasswordInHistory: "New password is in list of old passwords",
}
// Adds descriptions to an LDAP Response packet for debugging
func addLDAPDescriptions(packet *ber.Packet) (err error) {
defer func() {
if r := recover(); r != nil {
err = NewError(ErrorDebugging, fmt.Errorf("ldap: cannot process packet to add descriptions: %s", r))
}
}()
packet.Description = "LDAP Response"
packet.Children[0].Description = "Message ID"
application := uint8(packet.Children[1].Tag)
packet.Children[1].Description = ApplicationMap[application]
switch application {
case ApplicationBindRequest:
err = addRequestDescriptions(packet)
case ApplicationBindResponse:
err = addDefaultLDAPResponseDescriptions(packet)
case ApplicationUnbindRequest:
err = addRequestDescriptions(packet)
case ApplicationSearchRequest:
err = addRequestDescriptions(packet)
case ApplicationSearchResultEntry:
packet.Children[1].Children[0].Description = "Object Name"
packet.Children[1].Children[1].Description = "Attributes"
for _, child := range packet.Children[1].Children[1].Children {
child.Description = "Attribute"
child.Children[0].Description = "Attribute Name"
child.Children[1].Description = "Attribute Values"
for _, grandchild := range child.Children[1].Children {
grandchild.Description = "Attribute Value"
}
}
if len(packet.Children) == 3 {
err = addControlDescriptions(packet.Children[2])
}
case ApplicationSearchResultDone:
err = addDefaultLDAPResponseDescriptions(packet)
case ApplicationModifyRequest:
err = addRequestDescriptions(packet)
case ApplicationModifyResponse:
case ApplicationAddRequest:
err = addRequestDescriptions(packet)
case ApplicationAddResponse:
case ApplicationDelRequest:
err = addRequestDescriptions(packet)
case ApplicationDelResponse:
case ApplicationModifyDNRequest:
err = addRequestDescriptions(packet)
case ApplicationModifyDNResponse:
case ApplicationCompareRequest:
err = addRequestDescriptions(packet)
case ApplicationCompareResponse:
case ApplicationAbandonRequest:
err = addRequestDescriptions(packet)
case ApplicationSearchResultReference:
case ApplicationExtendedRequest:
err = addRequestDescriptions(packet)
case ApplicationExtendedResponse:
}
return err
}
func addControlDescriptions(packet *ber.Packet) error {
packet.Description = "Controls"
for _, child := range packet.Children {
var value *ber.Packet
controlType := ""
child.Description = "Control"
switch len(child.Children) {
case 0:
// at least one child is required for control type
return fmt.Errorf("at least one child is required for control type")
case 1:
// just type, no criticality or value
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
case 2:
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
// Children[1] could be criticality or value (both are optional)
// duck-type on whether this is a boolean
if _, ok := child.Children[1].Value.(bool); ok {
child.Children[1].Description = "Criticality"
} else {
child.Children[1].Description = "Control Value"
value = child.Children[1]
}
case 3:
// criticality and value present
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
child.Children[1].Description = "Criticality"
child.Children[2].Description = "Control Value"
value = child.Children[2]
default:
// more than 3 children is invalid
return fmt.Errorf("more than 3 children for control packet found")
}
if value == nil {
continue
}
switch controlType {
case ControlTypePaging:
value.Description += " (Paging)"
if value.Value != nil {
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
if err != nil {
return fmt.Errorf("failed to decode data bytes: %s", err)
}
value.Data.Truncate(0)
value.Value = nil
valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
value.AppendChild(valueChildren)
}
value.Children[0].Description = "Real Search Control Value"
value.Children[0].Children[0].Description = "Paging Size"
value.Children[0].Children[1].Description = "Cookie"
case ControlTypeBeheraPasswordPolicy:
value.Description += " (Password Policy - Behera Draft)"
if value.Value != nil {
valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
if err != nil {
return fmt.Errorf("failed to decode data bytes: %s", err)
}
value.Data.Truncate(0)
value.Value = nil
value.AppendChild(valueChildren)
}
sequence := value.Children[0]
for _, child := range sequence.Children {
if child.Tag == 0 {
//Warning
warningPacket := child.Children[0]
packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes())
if err != nil {
return fmt.Errorf("failed to decode data bytes: %s", err)
}
val, ok := packet.Value.(int64)
if ok {
if warningPacket.Tag == 0 {
//timeBeforeExpiration
value.Description += " (TimeBeforeExpiration)"
warningPacket.Value = val
} else if warningPacket.Tag == 1 {
//graceAuthNsRemaining
value.Description += " (GraceAuthNsRemaining)"
warningPacket.Value = val
}
}
} else if child.Tag == 1 {
// Error
packet, err := ber.DecodePacketErr(child.Data.Bytes())
if err != nil {
return fmt.Errorf("failed to decode data bytes: %s", err)
}
val, ok := packet.Value.(int8)
if !ok {
val = -1
}
child.Description = "Error"
child.Value = val
}
}
}
}
return nil
}
func addRequestDescriptions(packet *ber.Packet) error {
packet.Description = "LDAP Request"
packet.Children[0].Description = "Message ID"
packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
if len(packet.Children) == 3 {
return addControlDescriptions(packet.Children[2])
}
return nil
}
func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error {
err := GetLDAPError(packet)
if err == nil {
return nil
}
packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[err.(*Error).ResultCode] + ")"
packet.Children[1].Children[1].Description = "Matched DN (" + err.(*Error).MatchedDN + ")"
packet.Children[1].Children[2].Description = "Error Message"
if len(packet.Children[1].Children) > 3 {
packet.Children[1].Children[3].Description = "Referral"
}
if len(packet.Children) == 3 {
return addControlDescriptions(packet.Children[2])
}
return nil
}
// DebugBinaryFile reads and prints packets from the given filename
func DebugBinaryFile(fileName string) error {
file, err := ioutil.ReadFile(fileName)
if err != nil {
return NewError(ErrorDebugging, err)
}
ber.PrintBytes(os.Stdout, file, "")
packet, err := ber.DecodePacketErr(file)
if err != nil {
return fmt.Errorf("failed to decode packet: %s", err)
}
if err := addLDAPDescriptions(packet); err != nil {
return err
}
ber.PrintPacket(packet)
return nil
}
var hex = "0123456789abcdef"
func mustEscape(c byte) bool {
return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
}
// EscapeFilter escapes from the provided LDAP filter string the special
// characters in the set `()*\` and those out of the range 0 < c < 0x80,
// as defined in RFC4515.
func EscapeFilter(filter string) string {
escape := 0
for i := 0; i < len(filter); i++ {
if mustEscape(filter[i]) {
escape++
}
}
if escape == 0 {
return filter
}
buf := make([]byte, len(filter)+escape*2)
for i, j := 0, 0; i < len(filter); i++ {
c := filter[i]
if mustEscape(c) {
buf[j+0] = '\\'
buf[j+1] = hex[c>>4]
buf[j+2] = hex[c&0xf]
j += 3
} else {
buf[j] = c
j++
}
}
return string(buf)
}

85
vendor/gopkg.in/ldap.v3/moddn.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
// Package ldap - moddn.go contains ModifyDN functionality
//
// https://tools.ietf.org/html/rfc4511
// ModifyDNRequest ::= [APPLICATION 12] SEQUENCE {
// entry LDAPDN,
// newrdn RelativeLDAPDN,
// deleteoldrdn BOOLEAN,
// newSuperior [0] LDAPDN OPTIONAL }
//
//
package ldap
import (
"log"
ber "gopkg.in/asn1-ber.v1"
)
// ModifyDNRequest holds the request to modify a DN
type ModifyDNRequest struct {
DN string
NewRDN string
DeleteOldRDN bool
NewSuperior string
}
// NewModifyDNRequest creates a new request which can be passed to ModifyDN().
//
// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an
// empty string for just changing the object's RDN.
//
// For moving the object without renaming, the "rdn" must be the first
// RDN of the given DN.
//
// A call like
// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "")
// will setup the request to just rename uid=someone,dc=example,dc=org to
// uid=newname,dc=example,dc=org.
func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest {
return &ModifyDNRequest{
DN: dn,
NewRDN: rdn,
DeleteOldRDN: delOld,
NewSuperior: newSup,
}
}
func (req *ModifyDNRequest) appendTo(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request")
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.NewRDN, "New RDN"))
pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.DeleteOldRDN, "Delete old RDN"))
if req.NewSuperior != "" {
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.NewSuperior, "New Superior"))
}
envelope.AppendChild(pkt)
return nil
}
// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument
// to NewModifyDNRequest() is not "").
func (l *Conn) ModifyDN(m *ModifyDNRequest) error {
msgCtx, err := l.doRequest(m)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
packet, err := l.readPacket(msgCtx)
if err != nil {
return err
}
if packet.Children[1].Tag == ApplicationModifyDNResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
}
return nil
}

151
vendor/gopkg.in/ldap.v3/modify.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
// File contains Modify functionality
//
// https://tools.ietf.org/html/rfc4511
//
// ModifyRequest ::= [APPLICATION 6] SEQUENCE {
// object LDAPDN,
// changes SEQUENCE OF change SEQUENCE {
// operation ENUMERATED {
// add (0),
// delete (1),
// replace (2),
// ... },
// modification PartialAttribute } }
//
// PartialAttribute ::= SEQUENCE {
// type AttributeDescription,
// vals SET OF value AttributeValue }
//
// AttributeDescription ::= LDAPString
// -- Constrained to <attributedescription>
// -- [RFC4512]
//
// AttributeValue ::= OCTET STRING
//
package ldap
import (
"log"
ber "gopkg.in/asn1-ber.v1"
)
// Change operation choices
const (
AddAttribute = 0
DeleteAttribute = 1
ReplaceAttribute = 2
)
// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
type PartialAttribute struct {
// Type is the type of the partial attribute
Type string
// Vals are the values of the partial attribute
Vals []string
}
func (p *PartialAttribute) encode() *ber.Packet {
seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
for _, value := range p.Vals {
set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
}
seq.AppendChild(set)
return seq
}
// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
type Change struct {
// Operation is the type of change to be made
Operation uint
// Modification is the attribute to be modified
Modification PartialAttribute
}
func (c *Change) encode() *ber.Packet {
change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation"))
change.AppendChild(c.Modification.encode())
return change
}
// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
type ModifyRequest struct {
// DN is the distinguishedName of the directory entry to modify
DN string
// Changes contain the attributes to modify
Changes []Change
// Controls hold optional controls to send with the request
Controls []Control
}
// Add appends the given attribute to the list of changes to be made
func (req *ModifyRequest) Add(attrType string, attrVals []string) {
req.appendChange(AddAttribute, attrType, attrVals)
}
// Delete appends the given attribute to the list of changes to be made
func (req *ModifyRequest) Delete(attrType string, attrVals []string) {
req.appendChange(DeleteAttribute, attrType, attrVals)
}
// Replace appends the given attribute to the list of changes to be made
func (req *ModifyRequest) Replace(attrType string, attrVals []string) {
req.appendChange(ReplaceAttribute, attrType, attrVals)
}
func (req *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) {
req.Changes = append(req.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}})
}
func (req *ModifyRequest) appendTo(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
for _, change := range req.Changes {
changes.AppendChild(change.encode())
}
pkt.AppendChild(changes)
envelope.AppendChild(pkt)
if len(req.Controls) > 0 {
envelope.AppendChild(encodeControls(req.Controls))
}
return nil
}
// NewModifyRequest creates a modify request for the given DN
func NewModifyRequest(dn string, controls []Control) *ModifyRequest {
return &ModifyRequest{
DN: dn,
Controls: controls,
}
}
// Modify performs the ModifyRequest
func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
msgCtx, err := l.doRequest(modifyRequest)
if err != nil {
return err
}
defer l.finishMessage(msgCtx)
packet, err := l.readPacket(msgCtx)
if err != nil {
return err
}
if packet.Children[1].Tag == ApplicationModifyResponse {
err := GetLDAPError(packet)
if err != nil {
return err
}
} else {
log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
}
return nil
}

131
vendor/gopkg.in/ldap.v3/passwdmodify.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
// This file contains the password modify extended operation as specified in rfc 3062
//
// https://tools.ietf.org/html/rfc3062
//
package ldap
import (
"fmt"
ber "gopkg.in/asn1-ber.v1"
)
const (
passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
)
// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
type PasswordModifyRequest struct {
// UserIdentity is an optional string representation of the user associated with the request.
// This string may or may not be an LDAPDN [RFC2253].
// If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
UserIdentity string
// OldPassword, if present, contains the user's current password
OldPassword string
// NewPassword, if present, contains the desired password for this user
NewPassword string
}
// PasswordModifyResult holds the server response to a PasswordModifyRequest
type PasswordModifyResult struct {
// GeneratedPassword holds a password generated by the server, if present
GeneratedPassword string
// Referral are the returned referral
Referral string
}
func (req *PasswordModifyRequest) appendTo(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
if req.UserIdentity != "" {
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.UserIdentity, "User Identity"))
}
if req.OldPassword != "" {
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, req.OldPassword, "Old Password"))
}
if req.NewPassword != "" {
passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, req.NewPassword, "New Password"))
}
extendedRequestValue.AppendChild(passwordModifyRequestValue)
pkt.AppendChild(extendedRequestValue)
envelope.AppendChild(pkt)
return nil
}
// NewPasswordModifyRequest creates a new PasswordModifyRequest
//
// According to the RFC 3602:
// userIdentity is a string representing the user associated with the request.
// This string may or may not be an LDAPDN (RFC 2253).
// If userIdentity is empty then the operation will act on the user associated
// with the session.
//
// oldPassword is the current user's password, it can be empty or it can be
// needed depending on the session user access rights (usually an administrator
// can change a user's password without knowing the current one) and the
// password policy (see pwdSafeModify password policy's attribute)
//
// newPassword is the desired user's password. If empty the server can return
// an error or generate a new password that will be available in the
// PasswordModifyResult.GeneratedPassword
//
func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
return &PasswordModifyRequest{
UserIdentity: userIdentity,
OldPassword: oldPassword,
NewPassword: newPassword,
}
}
// PasswordModify performs the modification request
func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
msgCtx, err := l.doRequest(passwordModifyRequest)
if err != nil {
return nil, err
}
defer l.finishMessage(msgCtx)
packet, err := l.readPacket(msgCtx)
if err != nil {
return nil, err
}
result := &PasswordModifyResult{}
if packet.Children[1].Tag == ApplicationExtendedResponse {
err := GetLDAPError(packet)
if err != nil {
if IsErrorWithCode(err, LDAPResultReferral) {
for _, child := range packet.Children[1].Children {
if child.Tag == 3 {
result.Referral = child.Children[0].Value.(string)
}
}
}
return result, err
}
} else {
return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag))
}
extendedResponse := packet.Children[1]
for _, child := range extendedResponse.Children {
if child.Tag == 11 {
passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes())
if len(passwordModifyResponseValue.Children) == 1 {
if passwordModifyResponseValue.Children[0].Tag == 0 {
result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes())
}
}
}
}
return result, nil
}

66
vendor/gopkg.in/ldap.v3/request.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
package ldap
import (
"errors"
ber "gopkg.in/asn1-ber.v1"
)
var (
errRespChanClosed = errors.New("ldap: response channel closed")
errCouldNotRetMsg = errors.New("ldap: could not retrieve message")
)
type request interface {
appendTo(*ber.Packet) error
}
type requestFunc func(*ber.Packet) error
func (f requestFunc) appendTo(p *ber.Packet) error {
return f(p)
}
func (l *Conn) doRequest(req request) (*messageContext, error) {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
if err := req.appendTo(packet); err != nil {
return nil, err
}
if l.Debug {
ber.PrintPacket(packet)
}
msgCtx, err := l.sendMessage(packet)
if err != nil {
return nil, err
}
l.Debug.Printf("%d: returning", msgCtx.id)
return msgCtx, nil
}
func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) {
l.Debug.Printf("%d: waiting for response", msgCtx.id)
packetResponse, ok := <-msgCtx.responses
if !ok {
return nil, NewError(ErrorNetwork, errRespChanClosed)
}
packet, err := packetResponse.ReadPacket()
l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
if err != nil {
return nil, err
}
if packet == nil {
return nil, NewError(ErrorNetwork, errCouldNotRetMsg)
}
if l.Debug {
if err = addLDAPDescriptions(packet); err != nil {
return nil, err
}
ber.PrintPacket(packet)
}
return packet, nil
}

425
vendor/gopkg.in/ldap.v3/search.go generated vendored Normal file
View File

@ -0,0 +1,425 @@
// File contains Search functionality
//
// https://tools.ietf.org/html/rfc4511
//
// SearchRequest ::= [APPLICATION 3] SEQUENCE {
// baseObject LDAPDN,
// scope ENUMERATED {
// baseObject (0),
// singleLevel (1),
// wholeSubtree (2),
// ... },
// derefAliases ENUMERATED {
// neverDerefAliases (0),
// derefInSearching (1),
// derefFindingBaseObj (2),
// derefAlways (3) },
// sizeLimit INTEGER (0 .. maxInt),
// timeLimit INTEGER (0 .. maxInt),
// typesOnly BOOLEAN,
// filter Filter,
// attributes AttributeSelection }
//
// AttributeSelection ::= SEQUENCE OF selector LDAPString
// -- The LDAPString is constrained to
// -- <attributeSelector> in Section 4.5.1.8
//
// Filter ::= CHOICE {
// and [0] SET SIZE (1..MAX) OF filter Filter,
// or [1] SET SIZE (1..MAX) OF filter Filter,
// not [2] Filter,
// equalityMatch [3] AttributeValueAssertion,
// substrings [4] SubstringFilter,
// greaterOrEqual [5] AttributeValueAssertion,
// lessOrEqual [6] AttributeValueAssertion,
// present [7] AttributeDescription,
// approxMatch [8] AttributeValueAssertion,
// extensibleMatch [9] MatchingRuleAssertion,
// ... }
//
// SubstringFilter ::= SEQUENCE {
// type AttributeDescription,
// substrings SEQUENCE SIZE (1..MAX) OF substring CHOICE {
// initial [0] AssertionValue, -- can occur at most once
// any [1] AssertionValue,
// final [2] AssertionValue } -- can occur at most once
// }
//
// MatchingRuleAssertion ::= SEQUENCE {
// matchingRule [1] MatchingRuleId OPTIONAL,
// type [2] AttributeDescription OPTIONAL,
// matchValue [3] AssertionValue,
// dnAttributes [4] BOOLEAN DEFAULT FALSE }
//
//
package ldap
import (
"errors"
"fmt"
"sort"
"strings"
ber "gopkg.in/asn1-ber.v1"
)
// scope choices
const (
ScopeBaseObject = 0
ScopeSingleLevel = 1
ScopeWholeSubtree = 2
)
// ScopeMap contains human readable descriptions of scope choices
var ScopeMap = map[int]string{
ScopeBaseObject: "Base Object",
ScopeSingleLevel: "Single Level",
ScopeWholeSubtree: "Whole Subtree",
}
// derefAliases
const (
NeverDerefAliases = 0
DerefInSearching = 1
DerefFindingBaseObj = 2
DerefAlways = 3
)
// DerefMap contains human readable descriptions of derefAliases choices
var DerefMap = map[int]string{
NeverDerefAliases: "NeverDerefAliases",
DerefInSearching: "DerefInSearching",
DerefFindingBaseObj: "DerefFindingBaseObj",
DerefAlways: "DerefAlways",
}
// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
// same input map of attributes, the output entry will contain the same order of attributes
func NewEntry(dn string, attributes map[string][]string) *Entry {
var attributeNames []string
for attributeName := range attributes {
attributeNames = append(attributeNames, attributeName)
}
sort.Strings(attributeNames)
var encodedAttributes []*EntryAttribute
for _, attributeName := range attributeNames {
encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
}
return &Entry{
DN: dn,
Attributes: encodedAttributes,
}
}
// Entry represents a single search result entry
type Entry struct {
// DN is the distinguished name of the entry
DN string
// Attributes are the returned attributes for the entry
Attributes []*EntryAttribute
}
// GetAttributeValues returns the values for the named attribute, or an empty list
func (e *Entry) GetAttributeValues(attribute string) []string {
for _, attr := range e.Attributes {
if attr.Name == attribute {
return attr.Values
}
}
return []string{}
}
// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
for _, attr := range e.Attributes {
if attr.Name == attribute {
return attr.ByteValues
}
}
return [][]byte{}
}
// GetAttributeValue returns the first value for the named attribute, or ""
func (e *Entry) GetAttributeValue(attribute string) string {
values := e.GetAttributeValues(attribute)
if len(values) == 0 {
return ""
}
return values[0]
}
// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
func (e *Entry) GetRawAttributeValue(attribute string) []byte {
values := e.GetRawAttributeValues(attribute)
if len(values) == 0 {
return []byte{}
}
return values[0]
}
// Print outputs a human-readable description
func (e *Entry) Print() {
fmt.Printf("DN: %s\n", e.DN)
for _, attr := range e.Attributes {
attr.Print()
}
}
// PrettyPrint outputs a human-readable description indenting
func (e *Entry) PrettyPrint(indent int) {
fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
for _, attr := range e.Attributes {
attr.PrettyPrint(indent + 2)
}
}
// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
func NewEntryAttribute(name string, values []string) *EntryAttribute {
var bytes [][]byte
for _, value := range values {
bytes = append(bytes, []byte(value))
}
return &EntryAttribute{
Name: name,
Values: values,
ByteValues: bytes,
}
}
// EntryAttribute holds a single attribute
type EntryAttribute struct {
// Name is the name of the attribute
Name string
// Values contain the string values of the attribute
Values []string
// ByteValues contain the raw values of the attribute
ByteValues [][]byte
}
// Print outputs a human-readable description
func (e *EntryAttribute) Print() {
fmt.Printf("%s: %s\n", e.Name, e.Values)
}
// PrettyPrint outputs a human-readable description with indenting
func (e *EntryAttribute) PrettyPrint(indent int) {
fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
}
// SearchResult holds the server's response to a search request
type SearchResult struct {
// Entries are the returned entries
Entries []*Entry
// Referrals are the returned referrals
Referrals []string
// Controls are the returned controls
Controls []Control
}
// Print outputs a human-readable description
func (s *SearchResult) Print() {
for _, entry := range s.Entries {
entry.Print()
}
}
// PrettyPrint outputs a human-readable description with indenting
func (s *SearchResult) PrettyPrint(indent int) {
for _, entry := range s.Entries {
entry.PrettyPrint(indent)
}
}
// SearchRequest represents a search request to send to the server
type SearchRequest struct {
BaseDN string
Scope int
DerefAliases int
SizeLimit int
TimeLimit int
TypesOnly bool
Filter string
Attributes []string
Controls []Control
}
func (req *SearchRequest) appendTo(envelope *ber.Packet) error {
pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.BaseDN, "Base DN"))
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.Scope), "Scope"))
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.DerefAliases), "Deref Aliases"))
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.SizeLimit), "Size Limit"))
pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.TimeLimit), "Time Limit"))
pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.TypesOnly, "Types Only"))
// compile and encode filter
filterPacket, err := CompileFilter(req.Filter)
if err != nil {
return err
}
pkt.AppendChild(filterPacket)
// encode attributes
attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
for _, attribute := range req.Attributes {
attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
}
pkt.AppendChild(attributesPacket)
envelope.AppendChild(pkt)
if len(req.Controls) > 0 {
envelope.AppendChild(encodeControls(req.Controls))
}
return nil
}
// NewSearchRequest creates a new search request
func NewSearchRequest(
BaseDN string,
Scope, DerefAliases, SizeLimit, TimeLimit int,
TypesOnly bool,
Filter string,
Attributes []string,
Controls []Control,
) *SearchRequest {
return &SearchRequest{
BaseDN: BaseDN,
Scope: Scope,
DerefAliases: DerefAliases,
SizeLimit: SizeLimit,
TimeLimit: TimeLimit,
TypesOnly: TypesOnly,
Filter: Filter,
Attributes: Attributes,
Controls: Controls,
}
}
// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
// The following four cases are possible given the arguments:
// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
var pagingControl *ControlPaging
control := FindControl(searchRequest.Controls, ControlTypePaging)
if control == nil {
pagingControl = NewControlPaging(pagingSize)
searchRequest.Controls = append(searchRequest.Controls, pagingControl)
} else {
castControl, ok := control.(*ControlPaging)
if !ok {
return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control)
}
if castControl.PagingSize != pagingSize {
return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
}
pagingControl = castControl
}
searchResult := new(SearchResult)
for {
result, err := l.Search(searchRequest)
l.Debug.Printf("Looking for Paging Control...")
if err != nil {
return searchResult, err
}
if result == nil {
return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
}
for _, entry := range result.Entries {
searchResult.Entries = append(searchResult.Entries, entry)
}
for _, referral := range result.Referrals {
searchResult.Referrals = append(searchResult.Referrals, referral)
}
for _, control := range result.Controls {
searchResult.Controls = append(searchResult.Controls, control)
}
l.Debug.Printf("Looking for Paging Control...")
pagingResult := FindControl(result.Controls, ControlTypePaging)
if pagingResult == nil {
pagingControl = nil
l.Debug.Printf("Could not find paging control. Breaking...")
break
}
cookie := pagingResult.(*ControlPaging).Cookie
if len(cookie) == 0 {
pagingControl = nil
l.Debug.Printf("Could not find cookie. Breaking...")
break
}
pagingControl.SetCookie(cookie)
}
if pagingControl != nil {
l.Debug.Printf("Abandoning Paging...")
pagingControl.PagingSize = 0
l.Search(searchRequest)
}
return searchResult, nil
}
// Search performs the given search request
func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
msgCtx, err := l.doRequest(searchRequest)
if err != nil {
return nil, err
}
defer l.finishMessage(msgCtx)
result := &SearchResult{
Entries: make([]*Entry, 0),
Referrals: make([]string, 0),
Controls: make([]Control, 0)}
for {
packet, err := l.readPacket(msgCtx)
if err != nil {
return nil, err
}
switch packet.Children[1].Tag {
case 4:
entry := new(Entry)
entry.DN = packet.Children[1].Children[0].Value.(string)
for _, child := range packet.Children[1].Children[1].Children {
attr := new(EntryAttribute)
attr.Name = child.Children[0].Value.(string)
for _, value := range child.Children[1].Children {
attr.Values = append(attr.Values, value.Value.(string))
attr.ByteValues = append(attr.ByteValues, value.ByteValue)
}
entry.Attributes = append(entry.Attributes, attr)
}
result.Entries = append(result.Entries, entry)
case 5:
err := GetLDAPError(packet)
if err != nil {
return nil, err
}
if len(packet.Children) == 3 {
for _, child := range packet.Children[2].Children {
decodedChild, err := DecodeControl(child)
if err != nil {
return nil, fmt.Errorf("failed to decode child control: %s", err)
}
result.Controls = append(result.Controls, decodedChild)
}
}
return result, nil
case 19:
result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
}
}
}

373
vendor/layeh.com/radius/LICENSE generated vendored Normal file
View File

@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

95
vendor/layeh.com/radius/README.md generated vendored Normal file
View File

@ -0,0 +1,95 @@
<img src="internal/radius.svg" width="250" align="right">
# radius
a Go (golang) [RADIUS](https://tools.ietf.org/html/rfc2865) client and server implementation
[![GoDoc](https://godoc.org/layeh.com/radius?status.svg)](https://godoc.org/layeh.com/radius)
[![CircleCI](https://circleci.com/gh/layeh/radius/tree/master.svg?style=shield)](https://circleci.com/gh/layeh/radius/tree/master)
## Installation
go get -u layeh.com/radius
## Client example
```go
package main
import (
"context"
"log"
"layeh.com/radius"
"layeh.com/radius/rfc2865"
)
func main() {
packet := radius.New(radius.CodeAccessRequest, []byte(`secret`))
rfc2865.UserName_SetString(packet, "tim")
rfc2865.UserPassword_SetString(packet, "12345")
response, err := radius.Exchange(context.Background(), packet, "localhost:1812")
if err != nil {
log.Fatal(err)
}
log.Println("Code:", response.Code)
}
```
## Server example
```go
package main
import (
"log"
"layeh.com/radius"
"layeh.com/radius/rfc2865"
)
func main() {
handler := func(w radius.ResponseWriter, r *radius.Request) {
username := rfc2865.UserName_GetString(r.Packet)
password := rfc2865.UserPassword_GetString(r.Packet)
var code radius.Code
if username == "tim" && password == "12345" {
code = radius.CodeAccessAccept
} else {
code = radius.CodeAccessReject
}
log.Printf("Writing %v to %v", code, r.RemoteAddr)
w.Write(r.Response(code))
}
server := radius.PacketServer{
Handler: radius.HandlerFunc(handler),
SecretSource: radius.StaticSecretSource([]byte(`secret`)),
}
log.Printf("Starting server on :1812")
if err := server.ListenAndServe(); err != nil {
log.Fatal(err)
}
}
```
## RADIUS Dictionaries
Included in this package is the command line program `radius-dict-gen`. It can be installed with:
go get -u layeh.com/radius/cmd/radius-dict-gen
Given a FreeRADIUS dictionary, the program will generate helper functions and types for reading and manipulating RADIUS attributes in a packet. It is recommended that generated code be used for any RADIUS dictionary you would like to consume.
Included in this repository are sub-packages of generated helpers for commonly used RADIUS attributes, including [`rfc2865`](https://godoc.org/layeh.com/radius/rfc2865) and [`rfc2866`](https://godoc.org/layeh.com/radius/rfc2866).
## License
MPL 2.0
## Author
Tim Cooper (<tim.cooper@layeh.com>)

469
vendor/layeh.com/radius/attribute.go generated vendored Normal file
View File

@ -0,0 +1,469 @@
package radius
import (
"bytes"
"crypto/md5"
"encoding/binary"
"errors"
"math"
"net"
"strconv"
"time"
)
// ErrNoAttribute is returned when an attribute was not found when one was
// expected.
var ErrNoAttribute = errors.New("radius: attribute not found")
// Attribute is a wire encoded RADIUS attribute.
type Attribute []byte
// Integer returns the given attribute as an integer. An error is returned if
// the attribute is not 4 bytes long.
func Integer(a Attribute) (uint32, error) {
if len(a) != 4 {
return 0, errors.New("invalid length")
}
return binary.BigEndian.Uint32(a), nil
}
// NewInteger creates a new Attribute from the given integer value.
func NewInteger(i uint32) Attribute {
v := make([]byte, 4)
binary.BigEndian.PutUint32(v, i)
return Attribute(v)
}
// String returns the given attribute as a string.
func String(a Attribute) string {
return string(a)
}
// NewString returns a new Attribute from the given string. An error is returned
// if the string length is greater than 253.
func NewString(s string) (Attribute, error) {
if len(s) > 253 {
return nil, errors.New("string too long")
}
return Attribute(s), nil
}
// Bytes returns the given Attribute as a byte slice.
func Bytes(a Attribute) []byte {
b := make([]byte, len(a))
copy(b, []byte(a))
return b
}
// NewBytes returns a new Attribute from the given byte slice. An error is
// returned if the slice is longer than 253.
func NewBytes(b []byte) (Attribute, error) {
if len(b) > 253 {
return nil, errors.New("value too long")
}
a := make(Attribute, len(b))
copy(a, Attribute(b))
return a, nil
}
// IPAddr returns the given Attribute as an IPv4 IP address. An error is
// returned if the attribute is not 4 bytes long.
func IPAddr(a Attribute) (net.IP, error) {
if len(a) != net.IPv4len {
return nil, errors.New("invalid length")
}
b := make([]byte, net.IPv4len)
copy(b, []byte(a))
return b, nil
}
// NewIPAddr returns a new Attribute from the given IP address. An error is
// returned if the given address is not an IPv4 address.
func NewIPAddr(a net.IP) (Attribute, error) {
a = a.To4()
if a == nil {
return nil, errors.New("invalid IPv4 address")
}
b := make(Attribute, len(a))
copy(b, Attribute(a))
return b, nil
}
// IPv6Addr returns the given Attribute as an IPv6 IP address. An error is
// returned if the attribute is not 16 bytes long.
func IPv6Addr(a Attribute) (net.IP, error) {
if len(a) != net.IPv6len {
return nil, errors.New("invalid length")
}
b := make([]byte, net.IPv6len)
copy(b, []byte(a))
return b, nil
}
// NewIPv6Addr returns a new Attribute from the given IP address. An error is
// returned if the given address is not an IPv6 address.
func NewIPv6Addr(a net.IP) (Attribute, error) {
a = a.To16()
if a == nil {
return nil, errors.New("invalid IPv6 address")
}
b := make(Attribute, len(a))
copy(b, Attribute(a))
return b, nil
}
// IFID returns the given attribute as a 8-byte hardware address. An error is
// return if the attribute is not 8 bytes long.
func IFID(a Attribute) (net.HardwareAddr, error) {
if len(a) != 8 {
return nil, errors.New("invalid length")
}
ifid := make(net.HardwareAddr, len(a))
copy(ifid, a)
return ifid, nil
}
// NewIFID returns a new Attribute from the given hardware address. An error
// is returned if the address is not 8 bytes long.
func NewIFID(addr net.HardwareAddr) (Attribute, error) {
if len(addr) != 8 {
return nil, errors.New("invalid length")
}
attr := make(Attribute, len(addr))
copy(attr, addr)
return attr, nil
}
// UserPassword decrypts the given "User-Password"-encrypted (as defined in RFC
// 2865) Attribute, and returns the plaintext. An error is returned if the
// attribute length is invalid, the secret is empty, or the requestAuthenticator
// length is invalid.
func UserPassword(a Attribute, secret, requestAuthenticator []byte) ([]byte, error) {
if len(a) < 16 || len(a) > 128 {
return nil, errors.New("invalid attribute length (" + strconv.Itoa(len(a)) + ")")
}
if len(secret) == 0 {
return nil, errors.New("empty secret")
}
if len(requestAuthenticator) != 16 {
return nil, errors.New("invalid requestAuthenticator length (" + strconv.Itoa(len(requestAuthenticator)) + ")")
}
dec := make([]byte, 0, len(a))
hash := md5.New()
hash.Write(secret)
hash.Write(requestAuthenticator)
dec = hash.Sum(dec)
for i, b := range a[:16] {
dec[i] ^= b
}
for i := 16; i < len(a); i += 16 {
hash.Reset()
hash.Write(secret)
hash.Write(a[i-16 : i])
dec = hash.Sum(dec)
for j, b := range a[i : i+16] {
dec[i+j] ^= b
}
}
if i := bytes.IndexByte(dec, 0); i > -1 {
return dec[:i], nil
}
return dec, nil
}
// NewUserPassword returns a new "User-Password"-encrypted attribute from the
// given plaintext, secret, and requestAuthenticator. An error is returned if
// the plaintext is too long, the secret is empty, or the requestAuthenticator
// is an invalid length.
func NewUserPassword(plaintext, secret, requestAuthenticator []byte) (Attribute, error) {
if len(plaintext) > 128 {
return nil, errors.New("plaintext longer than 128 characters")
}
if len(secret) == 0 {
return nil, errors.New("empty secret")
}
if len(requestAuthenticator) != 16 {
return nil, errors.New("requestAuthenticator not 16-bytes")
}
chunks := (len(plaintext) + 16 - 1) / 16
if chunks == 0 {
chunks = 1
}
enc := make([]byte, 0, chunks*16)
hash := md5.New()
hash.Write(secret)
hash.Write(requestAuthenticator)
enc = hash.Sum(enc)
for i, b := range plaintext[:16] {
enc[i] ^= b
}
for i := 16; i < len(plaintext); i += 16 {
hash.Reset()
hash.Write(secret)
hash.Write(enc[i-16 : i])
enc = hash.Sum(enc)
for j, b := range plaintext[i : i+16] {
enc[i+j] ^= b
}
}
return enc, nil
}
// Date returns the given Attribute as time.Time. An error is returned if the
// attribute is not 4 bytes long.
func Date(a Attribute) (time.Time, error) {
if len(a) != 4 {
return time.Time{}, errors.New("invalid length")
}
sec := binary.BigEndian.Uint32([]byte(a))
return time.Unix(int64(sec), 0), nil
}
// NewDate returns a new Attribute from the given time.Time.
func NewDate(t time.Time) (Attribute, error) {
unix := t.Unix()
if unix > math.MaxUint32 {
return nil, errors.New("time out of range")
}
a := make([]byte, 4)
binary.BigEndian.PutUint32(a, uint32(t.Unix()))
return a, nil
}
// VendorSpecific returns the vendor ID and value from the given attribute. An
// error is returned if the attribute is less than 5 bytes long.
func VendorSpecific(a Attribute) (vendorID uint32, value Attribute, err error) {
if len(a) < 5 {
err = errors.New("invalid length")
return
}
vendorID = binary.BigEndian.Uint32(a[:4])
value = make([]byte, len(a)-4)
copy(value, a[4:])
return
}
// NewVendorSpecific returns a new vendor specific attribute with the given
// vendor ID and value.
func NewVendorSpecific(vendorID uint32, value Attribute) (Attribute, error) {
if len(value) > 249 {
return nil, errors.New("value too long")
}
a := make([]byte, 4+len(value))
binary.BigEndian.PutUint32(a, vendorID)
copy(a[4:], value)
return a, nil
}
// Integer64 returns the given attribute as an integer. An error is returned if
// the attribute is not 8 bytes long.
func Integer64(a Attribute) (uint64, error) {
if len(a) != 8 {
return 0, errors.New("invalid length")
}
return binary.BigEndian.Uint64(a), nil
}
// NewInteger64 creates a new Attribute from the given integer value.
func NewInteger64(i uint64) Attribute {
v := make([]byte, 8)
binary.BigEndian.PutUint64(v, i)
return Attribute(v)
}
// TLV returns a components of a Type-Length-Value (TLV) attribute.
func TLV(a Attribute) (tlvType byte, tlvValue Attribute, err error) {
if len(a) < 3 || len(a) > 255 || int(a[1]) != len(a) {
err = errors.New("invalid length")
return
}
tlvType = a[0]
tlvValue = make(Attribute, len(a)-2)
copy(tlvValue, a[2:])
return
}
// NewTLV returns a new TLV attribute.
func NewTLV(tlvType byte, tlvValue Attribute) (Attribute, error) {
if len(tlvValue) < 1 || len(tlvValue) > 253 {
return nil, errors.New("invalid value length")
}
a := make(Attribute, 1+1+len(tlvValue))
a[0] = tlvType
a[1] = byte(1 + 1 + len(tlvValue))
copy(a[2:], tlvValue)
return a, nil
}
// NewTunnelPassword returns an RFC 2868 encrypted Tunnel-Password.
// A tag must be added on to the returned Attribute.
func NewTunnelPassword(password, salt, secret, requestAuthenticator []byte) (Attribute, error) {
if len(password) > 249 {
return nil, errors.New("invalid password length")
}
if len(salt) != 2 {
return nil, errors.New("invalid salt length")
}
if salt[0]&0x80 != 0x80 { // MSB must be 1
return nil, errors.New("invalid salt")
}
if len(secret) == 0 {
return nil, errors.New("empty secret")
}
if len(requestAuthenticator) != 16 {
return nil, errors.New("invalid requestAuthenticator length")
}
chunks := (1 + len(password) + 16 - 1) / 16
if chunks == 0 {
chunks = 1
}
attr := make([]byte, 2+chunks*16)
copy(attr[:2], salt)
attr[2] = byte(len(password))
copy(attr[3:], password)
hash := md5.New()
var b [md5.Size]byte
for chunk := 0; chunk < chunks; chunk++ {
hash.Reset()
hash.Write(secret)
if chunk == 0 {
hash.Write(requestAuthenticator)
hash.Write(salt)
} else {
hash.Write(attr[2+(chunk-1)*16 : 2+chunk*16])
}
hash.Sum(b[:0])
for i := 0; i < 16; i++ {
attr[2+chunk*16+i] ^= b[i]
}
}
return attr, nil
}
// TunnelPassword decrypts an RFC 2868 encrypted Tunnel-Password.
// The Attribute must not be prefixed with a tag.
func TunnelPassword(a Attribute, secret, requestAuthenticator []byte) (password, salt []byte, err error) {
if len(a) > 252 || len(a) < 18 || (len(a)-2)%16 != 0 {
err = errors.New("invalid length")
return
}
if len(secret) == 0 {
err = errors.New("empty secret")
return
}
if len(requestAuthenticator) != 16 {
err = errors.New("invalid requestAuthenticator length")
return
}
if a[0]&0x80 != 0x80 { // salt MSB must be 1
err = errors.New("invalid salt")
return
}
chunks := (len(a) - 2) / 16
plaintext := make([]byte, chunks*16)
hash := md5.New()
var b [md5.Size]byte
for chunk := 0; chunk < chunks; chunk++ {
hash.Reset()
hash.Write(secret)
if chunk == 0 {
hash.Write(requestAuthenticator)
hash.Write(a[:2]) // salt
} else {
hash.Write(a[2+(chunk-1)*16 : 2+chunk*16])
}
hash.Sum(b[:0])
for i := 0; i < 16; i++ {
plaintext[chunk*16+i] = a[2+chunk*16+i] ^ b[i]
}
}
passwordLength := plaintext[0]
if int(passwordLength) > (len(plaintext) - 1) {
err = errors.New("invalid password length")
return
}
password = plaintext[1 : 1+passwordLength]
salt = append([]byte(nil), a[:2]...)
return
}
func NewIPv6Prefix(prefix *net.IPNet) (Attribute, error) {
if prefix == nil {
return nil, errors.New("nil prefix")
}
if len(prefix.IP) != net.IPv6len {
return nil, errors.New("IP is not IPv6")
}
ones, bits := prefix.Mask.Size()
if bits != net.IPv6len*8 {
return nil, errors.New("mask is not IPv6")
}
attr := make(Attribute, 2+((ones+7)/8))
// attr[0] = 0x00
attr[1] = byte(ones)
copy(attr[2:], prefix.IP)
// clear final non-mask bits
if i := uint(ones % 8); i != 0 {
for ; i < 8; i++ {
attr[len(attr)-1] &^= 1 << (7 - i)
}
}
return attr, nil
}
func IPv6Prefix(a Attribute) (*net.IPNet, error) {
if len(a) < 2 || len(a) > 18 {
return nil, errors.New("invalid length")
}
prefixLength := int(a[1])
if (len(a)-2)*8 < prefixLength {
return nil, errors.New("invalid prefix length")
}
ip := make(net.IP, net.IPv6len)
copy(ip, a[2:])
// clear final non-mask bits
if i := uint(prefixLength % 8); i != 0 {
for ; i < 8; i++ {
ip[prefixLength/8] &^= 1 << (7 - i)
}
}
return &net.IPNet{
IP: ip,
Mask: net.CIDRMask(prefixLength, net.IPv6len*8),
}, nil
}

115
vendor/layeh.com/radius/attributes.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package radius
import (
"errors"
"sort"
)
// Type is the RADIUS attribute type.
type Type int
// TypeInvalid is a Type that can be used to represent an invalid RADIUS
// attribute type.
const TypeInvalid Type = -1
// Attributes is a map of RADIUS attribute types to slice of Attributes.
type Attributes map[Type][]Attribute
// ParseAttributes parses the wire-encoded RADIUS attributes and returns a new
// Attributes value. An error is returned if the buffer is malformed.
func ParseAttributes(b []byte) (Attributes, error) {
attrs := make(map[Type][]Attribute)
for len(b) > 0 {
if len(b) < 2 {
return nil, errors.New("short buffer")
}
length := int(b[1])
if length > len(b) || length < 2 || length > 255 {
return nil, errors.New("invalid attribute length")
}
typ := Type(b[0])
var value Attribute
if length > 2 {
value = make(Attribute, length-2)
copy(value, b[2:])
}
attrs[typ] = append(attrs[typ], value)
b = b[length:]
}
return attrs, nil
}
// Add appends the given Attribute to the map entry of the given type.
func (a Attributes) Add(key Type, value Attribute) {
a[key] = append(a[key], value)
}
// Del removes all Attributes of the given type from a.
func (a Attributes) Del(key Type) {
delete(a, key)
}
// Get returns the first Attribute of Type key. nil is returned if no Attribute
// of Type key exists in a.
func (a Attributes) Get(key Type) Attribute {
attr, _ := a.Lookup(key)
return attr
}
// Lookup returns the first Attribute of Type key. nil and false is returned if
// no Attribute of Type key exists in a.
func (a Attributes) Lookup(key Type) (Attribute, bool) {
m := a[key]
if len(m) == 0 {
return nil, false
}
return m[0], true
}
// Set removes all Attributes of Type key and appends value.
func (a Attributes) Set(key Type, value Attribute) {
a[key] = append(a[key][:0], value)
}
func (a Attributes) encodeTo(b []byte) {
types := make([]int, 0, len(a))
for typ := range a {
if typ >= 1 && typ <= 255 {
types = append(types, int(typ))
}
}
sort.Ints(types)
for _, typ := range types {
for _, attr := range a[Type(typ)] {
if len(attr) > 255 {
continue
}
size := 1 + 1 + len(attr)
b[0] = byte(typ)
b[1] = byte(size)
copy(b[2:], attr)
b = b[size:]
}
}
}
func (a Attributes) wireSize() (bytes int) {
for typ, attrs := range a {
if typ < 1 || typ > 255 {
continue
}
for _, attr := range attrs {
if len(attr) > 255 {
return -1
}
// type field + length field + value field
bytes += 1 + 1 + len(attr)
}
}
return
}

130
vendor/layeh.com/radius/client.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
package radius
import (
"context"
"net"
"time"
)
// Client is a RADIUS client that can exchange packets with a RADIUS server.
type Client struct {
// Network on which to make the connection. Defaults to "udp".
Net string
// Dialer to use when making the outgoing connections.
Dialer net.Dialer
// Interval on which to resend packet (zero or negative value means no
// retry).
Retry time.Duration
// MaxPacketErrors controls how many packet parsing and validation errors
// the client will ignore before returning the error from Exchange.
//
// If zero, Exchange will drop all packet parsing errors.
MaxPacketErrors int
// InsecureSkipVerify controls whether the client should skip verifying
// response packets received.
InsecureSkipVerify bool
}
// DefaultClient is the RADIUS client used by the Exchange function.
var DefaultClient = &Client{
Retry: time.Second,
MaxPacketErrors: 10,
}
// Exchange uses DefaultClient to send the given RADIUS packet to the server at
// address addr and waits for a response.
func Exchange(ctx context.Context, packet *Packet, addr string) (*Packet, error) {
return DefaultClient.Exchange(ctx, packet, addr)
}
// Exchange sends the packet to the given server and waits for a response. ctx
// must be non-nil.
func (c *Client) Exchange(ctx context.Context, packet *Packet, addr string) (*Packet, error) {
if ctx == nil {
panic("nil context")
}
wire, err := packet.Encode()
if err != nil {
return nil, err
}
connNet := c.Net
if connNet == "" {
connNet = "udp"
}
conn, err := c.Dialer.DialContext(ctx, connNet, addr)
if err != nil {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
return nil, err
}
defer conn.Close()
conn.Write(wire)
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
defer cancel()
var retryTimer <-chan time.Time
if c.Retry > 0 {
retry := time.NewTicker(c.Retry)
defer retry.Stop()
retryTimer = retry.C
}
go func() {
defer conn.Close()
for {
select {
case <-retryTimer:
conn.Write(wire)
case <-ctx.Done():
return
}
}
}()
var packetErrorCount int
var incoming [MaxPacketLength]byte
for {
n, err := conn.Read(incoming[:])
if err != nil {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
return nil, err
}
received, err := Parse(incoming[:n], packet.Secret)
if err != nil {
packetErrorCount++
if c.MaxPacketErrors > 0 && packetErrorCount >= c.MaxPacketErrors {
return nil, err
}
continue
}
if !c.InsecureSkipVerify && !IsAuthenticResponse(incoming[:n], wire, packet.Secret) {
packetErrorCount++
if c.MaxPacketErrors > 0 && packetErrorCount >= c.MaxPacketErrors {
return nil, &NonAuthenticResponseError{}
}
continue
}
return received, nil
}
}

64
vendor/layeh.com/radius/code.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package radius
import (
"strconv"
)
// Code defines the RADIUS packet type.
type Code int
// Standard RADIUS packet codes.
const (
CodeAccessRequest Code = 1
CodeAccessAccept Code = 2
CodeAccessReject Code = 3
CodeAccountingRequest Code = 4
CodeAccountingResponse Code = 5
CodeAccessChallenge Code = 11
CodeStatusServer Code = 12
CodeStatusClient Code = 13
CodeDisconnectRequest Code = 40
CodeDisconnectACK Code = 41
CodeDisconnectNAK Code = 42
CodeCoARequest Code = 43
CodeCoAACK Code = 44
CodeCoANAK Code = 45
CodeReserved Code = 255
)
// String returns a string representation of the code.
func (c Code) String() string {
switch c {
case CodeAccessRequest:
return `Access-Request`
case CodeAccessAccept:
return `Access-Accept`
case CodeAccessReject:
return `Access-Reject`
case CodeAccountingRequest:
return `Accounting-Request`
case CodeAccountingResponse:
return `Accounting-Response`
case CodeAccessChallenge:
return `Access-Challenge`
case CodeStatusServer:
return `Status-Server`
case CodeStatusClient:
return `Status-Client`
case CodeDisconnectRequest:
return `Disconnect-Request`
case CodeDisconnectACK:
return `Disconnect-ACK`
case CodeDisconnectNAK:
return `Disconnect-NAK`
case CodeCoARequest:
return `CoA-Request`
case CodeCoAACK:
return `CoA-ACK`
case CodeCoANAK:
return `CoA-NAK`
case CodeReserved:
return `Reserved`
}
return "Code(" + strconv.Itoa(int(c)) + ")"
}

2
vendor/layeh.com/radius/doc.go generated vendored Normal file
View File

@ -0,0 +1,2 @@
// Package radius provides a RADIUS client and server (RFC 2865, RFC 2866).
package radius // import "layeh.com/radius"

10
vendor/layeh.com/radius/errors.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package radius
// NonAuthenticResponseError is returned when a client was expecting
// a valid response but did not receive one.
type NonAuthenticResponseError struct {
}
func (e *NonAuthenticResponseError) Error() string {
return `radius: non-authentic response`
}

1
vendor/layeh.com/radius/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module layeh.com/radius

164
vendor/layeh.com/radius/packet.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
package radius
import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/binary"
"errors"
)
// MaxPacketLength is the maximum wire length of a RADIUS packet.
const MaxPacketLength = 4095
// Packet is a RADIUS packet.
type Packet struct {
Code Code
Identifier byte
Authenticator [16]byte
Secret []byte
Attributes
}
// New creates a new packet with the Code, Secret fields set to the given
// values. The returned packet's Identifier and Authenticator fields are filled
// with random values.
//
// The function panics if not enough random data could be generated.
func New(code Code, secret []byte) *Packet {
var buff [17]byte
if _, err := rand.Read(buff[:]); err != nil {
panic(err)
}
packet := &Packet{
Code: code,
Identifier: buff[0],
Secret: secret,
Attributes: make(Attributes),
}
copy(packet.Authenticator[:], buff[1:])
return packet
}
// Parse parses an encoded RADIUS packet b. An error is returned if the packet
// is malformed.
func Parse(b, secret []byte) (*Packet, error) {
if len(b) < 20 {
return nil, errors.New("radius: packet not at least 20 bytes long")
}
length := int(binary.BigEndian.Uint16(b[2:4]))
if length < 20 || length > MaxPacketLength || len(b) != length {
return nil, errors.New("radius: invalid packet length")
}
attrs, err := ParseAttributes(b[20:])
if err != nil {
return nil, err
}
packet := &Packet{
Code: Code(b[0]),
Identifier: b[1],
Secret: secret,
Attributes: attrs,
}
copy(packet.Authenticator[:], b[4:20])
return packet, nil
}
// Response returns a new packet that has the same identifier, secret, and
// authenticator as the current packet.
func (p *Packet) Response(code Code) *Packet {
q := &Packet{
Code: code,
Identifier: p.Identifier,
Secret: p.Secret,
Attributes: make(Attributes),
}
copy(q.Authenticator[:], p.Authenticator[:])
return q
}
// Encode encodes the RADIUS packet to wire format. An error is returned if the
// encoded packet is too long (due to its Attributes), or if the packet has an
// unknown Code.
func (p *Packet) Encode() ([]byte, error) {
attributesSize := p.Attributes.wireSize()
if attributesSize == -1 {
return nil, errors.New("invalid packet attribute length")
}
size := 20 + attributesSize
if size > MaxPacketLength {
return nil, errors.New("encoded packet is too long")
}
b := make([]byte, size)
b[0] = byte(p.Code)
b[1] = byte(p.Identifier)
binary.BigEndian.PutUint16(b[2:4], uint16(size))
p.Attributes.encodeTo(b[20:])
switch p.Code {
case CodeAccessRequest, CodeStatusServer:
copy(b[4:20], p.Authenticator[:])
case CodeAccessAccept, CodeAccessReject, CodeAccountingRequest, CodeAccountingResponse, CodeAccessChallenge, CodeDisconnectRequest, CodeDisconnectACK, CodeDisconnectNAK, CodeCoARequest, CodeCoAACK, CodeCoANAK:
hash := md5.New()
hash.Write(b[:4])
switch p.Code {
case CodeAccountingRequest, CodeDisconnectRequest, CodeCoARequest:
var nul [16]byte
hash.Write(nul[:])
default:
hash.Write(p.Authenticator[:])
}
hash.Write(b[20:])
hash.Write(p.Secret)
hash.Sum(b[4:4:20])
default:
return nil, errors.New("radius: unknown Packet Code")
}
return b, nil
}
// IsAuthenticResponse returns if the given RADIUS response is an authentic
// response to the given request.
func IsAuthenticResponse(response, request, secret []byte) bool {
if len(response) < 20 || len(request) < 20 || len(secret) == 0 {
return false
}
hash := md5.New()
hash.Write(response[:4])
hash.Write(request[4:20])
hash.Write(response[20:])
hash.Write(secret)
var sum [md5.Size]byte
return bytes.Equal(hash.Sum(sum[:0]), response[4:20])
}
// IsAuthenticRequest returns if the given RADIUS request is an authentic
// request using the given secret.
func IsAuthenticRequest(request, secret []byte) bool {
if len(request) < 20 || len(secret) == 0 {
return false
}
switch Code(request[0]) {
case CodeAccessRequest, CodeStatusServer:
return true
case CodeAccountingRequest, CodeDisconnectRequest, CodeCoARequest:
hash := md5.New()
hash.Write(request[:4])
var nul [16]byte
hash.Write(nul[:])
hash.Write(request[20:])
hash.Write(secret)
var sum [md5.Size]byte
return bytes.Equal(hash.Sum(sum[:0]), request[4:20])
default:
return false
}
}

3
vendor/layeh.com/radius/rfc2865/generate.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
//go:generate go run ../cmd/radius-dict-gen/main.go -package rfc2865 -output generated.go /usr/share/freeradius/dictionary.rfc2865
package rfc2865

3310
vendor/layeh.com/radius/rfc2865/generated.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

241
vendor/layeh.com/radius/server-packet.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
package radius
import (
"context"
"errors"
"net"
"sync"
"sync/atomic"
)
type packetResponseWriter struct {
// listener that received the packet
conn net.PacketConn
addr net.Addr
}
func (r *packetResponseWriter) Write(packet *Packet) error {
encoded, err := packet.Encode()
if err != nil {
return err
}
if _, err := r.conn.WriteTo(encoded, r.addr); err != nil {
return err
}
return nil
}
// PacketServer listens for RADIUS requests on a packet-based protocols (e.g.
// UDP).
type PacketServer struct {
// The address on which the server listens. Defaults to :1812.
Addr string
// The network on which the server listens. Defaults to udp.
Network string
// The source from which the secret is obtained for parsing and validating
// the request.
SecretSource SecretSource
// Handler which is called to process the request.
Handler Handler
// Skip incoming packet authenticity validation.
// This should only be set to true for debugging purposes.
InsecureSkipVerify bool
shutdownRequested int32
mu sync.Mutex
ctx context.Context
ctxDone context.CancelFunc
listeners map[net.PacketConn]uint
lastActive chan struct{} // closed when the last active item finishes
activeCount int32
}
func (s *PacketServer) initLocked() {
if s.ctx == nil {
s.ctx, s.ctxDone = context.WithCancel(context.Background())
s.listeners = make(map[net.PacketConn]uint)
s.lastActive = make(chan struct{})
}
}
func (s *PacketServer) activeAdd() {
atomic.AddInt32(&s.activeCount, 1)
}
func (s *PacketServer) activeDone() {
if atomic.AddInt32(&s.activeCount, -1) == -1 {
close(s.lastActive)
}
}
// TODO: logger on PacketServer
// Serve accepts incoming connections on conn.
func (s *PacketServer) Serve(conn net.PacketConn) error {
if s.Handler == nil {
return errors.New("radius: nil Handler")
}
if s.SecretSource == nil {
return errors.New("radius: nil SecretSource")
}
s.mu.Lock()
s.initLocked()
if atomic.LoadInt32(&s.shutdownRequested) == 1 {
s.mu.Unlock()
return ErrServerShutdown
}
s.listeners[conn]++
s.mu.Unlock()
type requestKey struct {
IP string
Identifier byte
}
var (
requestsLock sync.Mutex
requests = map[requestKey]struct{}{}
)
s.activeAdd()
defer func() {
s.mu.Lock()
s.listeners[conn]--
if s.listeners[conn] == 0 {
delete(s.listeners, conn)
}
s.mu.Unlock()
s.activeDone()
}()
var buff [MaxPacketLength]byte
for {
n, remoteAddr, err := conn.ReadFrom(buff[:])
if err != nil {
if atomic.LoadInt32(&s.shutdownRequested) == 1 {
return ErrServerShutdown
}
if ne, ok := err.(net.Error); ok && !ne.Temporary() {
return err
}
continue
}
s.activeAdd()
go func(buff []byte, remoteAddr net.Addr) {
defer s.activeDone()
secret, err := s.SecretSource.RADIUSSecret(s.ctx, remoteAddr)
if err != nil {
return
}
if len(secret) == 0 {
return
}
if !s.InsecureSkipVerify && !IsAuthenticRequest(buff, secret) {
return
}
packet, err := Parse(buff, secret)
if err != nil {
return
}
key := requestKey{
IP: remoteAddr.String(),
Identifier: packet.Identifier,
}
requestsLock.Lock()
if _, ok := requests[key]; ok {
requestsLock.Unlock()
return
}
requests[key] = struct{}{}
requestsLock.Unlock()
response := packetResponseWriter{
conn: conn,
addr: remoteAddr,
}
defer func() {
requestsLock.Lock()
delete(requests, key)
requestsLock.Unlock()
}()
request := Request{
LocalAddr: conn.LocalAddr(),
RemoteAddr: remoteAddr,
Packet: packet,
ctx: s.ctx,
}
s.Handler.ServeRADIUS(&response, &request)
}(append([]byte(nil), buff[:n]...), remoteAddr)
}
}
// ListenAndServe starts a RADIUS server on the address given in s.
func (s *PacketServer) ListenAndServe() error {
if s.Handler == nil {
return errors.New("radius: nil Handler")
}
if s.SecretSource == nil {
return errors.New("radius: nil SecretSource")
}
addrStr := ":1812"
if s.Addr != "" {
addrStr = s.Addr
}
network := "udp"
if s.Network != "" {
network = s.Network
}
pc, err := net.ListenPacket(network, addrStr)
if err != nil {
return err
}
defer pc.Close()
return s.Serve(pc)
}
// Shutdown gracefully stops the server. It first closes all listeners and then
// waits for any running handlers to complete.
//
// Shutdown returns after nil all handlers have completed. ctx.Err() is
// returned if ctx is canceled.
//
// Any Serve methods return ErrShutdown after Shutdown is called.
func (s *PacketServer) Shutdown(ctx context.Context) error {
s.mu.Lock()
s.initLocked()
if atomic.CompareAndSwapInt32(&s.shutdownRequested, 0, 1) {
for listener := range s.listeners {
listener.Close()
}
s.ctxDone()
s.activeDone()
}
s.mu.Unlock()
select {
case <-s.lastActive:
return nil
case <-ctx.Done():
return ctx.Err()
}
}

89
vendor/layeh.com/radius/server.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package radius
import (
"context"
"errors"
"net"
)
// ErrServerShutdown is returned from server Serve methods when Shutdown
// has been called and handlers are still completing.
var ErrServerShutdown = errors.New("radius: server shutdown")
// Handler provides a handler to RADIUS server requests. When a RADIUS request
// is received, ServeRADIUS is called.
type Handler interface {
ServeRADIUS(w ResponseWriter, r *Request)
}
// HandlerFunc allows a function to implement Handler.
type HandlerFunc func(w ResponseWriter, r *Request)
// ServeRADIUS calls h(w, p).
func (h HandlerFunc) ServeRADIUS(w ResponseWriter, r *Request) {
h(w, r)
}
// Request is an incoming RADIUS request that is being handled by the server.
type Request struct {
// LocalAddr is the local address on which the incoming RADIUS request
// was received.
LocalAddr net.Addr
// RemoteAddr is the address from which the incoming RADIUS request
// was sent.
RemoteAddr net.Addr
// Packet is the RADIUS packet sent in the request.
*Packet
ctx context.Context
}
// Context returns the context of the request. If a context has not been set
// using WithContext, the Background context is returned.
func (r *Request) Context() context.Context {
if r.ctx != nil {
return r.ctx
}
return context.Background()
}
// WithContext returns a shallow copy of the request with the new request's
// context set to the given context.
func (r *Request) WithContext(ctx context.Context) *Request {
if ctx == nil {
panic("nil ctx")
}
req := new(Request)
*req = *r
req.ctx = ctx
return req
}
// ResponseWriter is used by RADIUS servers when replying to a RADIUS request.
type ResponseWriter interface {
Write(packet *Packet) error
}
// SecretSource supplies RADIUS servers with the secret that should be used for
// authorizing and decrypting packets.
//
// ctx is canceled if the server's Shutdown method is called.
//
// Returning an empty secret will discard the incoming packet.
type SecretSource interface {
RADIUSSecret(ctx context.Context, remoteAddr net.Addr) ([]byte, error)
}
// StaticSecretSource returns a SecretSource that uses secret for all requests.
func StaticSecretSource(secret []byte) SecretSource {
return &staticSecretSource{secret}
}
type staticSecretSource struct {
secret []byte
}
func (s *staticSecretSource) RADIUSSecret(ctx context.Context, remoteAddr net.Addr) ([]byte, error) {
return s.secret, nil
}

19
vendor/modules.txt vendored Normal file
View File

@ -0,0 +1,19 @@
# github.com/korylprince/go-ad-auth v2.2.0+incompatible
github.com/korylprince/go-ad-auth
# golang.org/x/text v0.3.2
golang.org/x/text/encoding
golang.org/x/text/encoding/internal
golang.org/x/text/encoding/internal/identifier
golang.org/x/text/encoding/unicode
golang.org/x/text/internal/utf8internal
golang.org/x/text/runes
golang.org/x/text/transform
# gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d
gopkg.in/asn1-ber.v1
# gopkg.in/ini.v1 v1.52.0
gopkg.in/ini.v1
# gopkg.in/ldap.v3 v3.1.0
gopkg.in/ldap.v3
# layeh.com/radius v0.0.0-20190322222518-890bc1058917
layeh.com/radius
layeh.com/radius/rfc2865