initial version of weather in golang
This commit is contained in:
parent
26b409a39d
commit
afe739bcd0
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
weather
|
||||
*.ini
|
104
functions.go
104
functions.go
@ -1,38 +1,41 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"log"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
// ParseArgs is not yet implemented
|
||||
func ParseArgs() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig fetch config from ini file
|
||||
func GetConfig(configfile string, weatherconfig *WeatherConfig) error {
|
||||
flag.Usage = Usage
|
||||
|
||||
config, err := ini.Load(configfile)
|
||||
HandleFatalError(err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wc WeatherConfig
|
||||
|
||||
owmSection := config.Section("owm")
|
||||
wc.OwmURL = owmSection.Key("url").String()
|
||||
wc.OwmAppID = owmSection.Key("appid").String()
|
||||
wc.OwmCities = owmSection.Key("cities").Strings(",")
|
||||
wc.OwmMeasurements = owmSection.Key("measurements").Strings(",")
|
||||
wc.OwmTable = owmSection.Key("table").String()
|
||||
weatherSection := config.Section("weather")
|
||||
wc.WeatherVersion = weatherSection.Key("version").String()
|
||||
wc.WeatherAppID = weatherSection.Key("appid").String()
|
||||
wc.WeatherCities = weatherSection.Key("cities").Strings(",")
|
||||
wc.WeatherMeasurements = weatherSection.Key("measurements").Strings(",")
|
||||
wc.WeatherTable = weatherSection.Key("table").String()
|
||||
|
||||
influxdbSection := config.Section("influxdb")
|
||||
wc.InfluxHost = influxdbSection.Key("hostname").String()
|
||||
wc.InfluxPort, err = influxdbSection.Key("port").Int()
|
||||
HandleError(err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wc.InfluxUser = influxdbSection.Key("username").String()
|
||||
wc.InfluxPass = influxdbSection.Key("password").String()
|
||||
wc.InfluxDB = influxdbSection.Key("database").String()
|
||||
@ -42,21 +45,68 @@ func GetConfig(configfile string, weatherconfig *WeatherConfig) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// HandleError handles errors to return err
|
||||
func HandleError(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
func FetchData(city string) (Data, error) {
|
||||
var d Data
|
||||
|
||||
pollTo := 30 * time.Millisecond
|
||||
|
||||
c := &http.Client{Timeout: pollTo * time.Second, Transport: &http.Transport{
|
||||
IdleConnTimeout: pollTo,
|
||||
DisableCompression: false,
|
||||
}}
|
||||
|
||||
resp, err := c.Get(fmt.Sprintf("https://api.openweathermap.org/data/%s/weather?q=%s&appid=%s", wc.WeatherVersion, city, wc.WeatherAppID))
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
err = json.Unmarshal(b, &d)
|
||||
|
||||
d.Humidity = d.Main["humidity"]
|
||||
d.Temperature = kelvin + d.Main["temp"]
|
||||
|
||||
return d, err
|
||||
}
|
||||
|
||||
// HandleFatalError fatal errors
|
||||
func HandleFatalError(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
// // SendToInflux sends time series data to influxdb
|
||||
// func SendToInflux(wc *WeatherConfig, data *[]Data) error {
|
||||
// httpClient, err := client.NewHTTPClient(client.HTTPConfig{
|
||||
// Addr: fmt.Sprintf("http://%s:%d", wc.InfluxHost, wc.InfluxPort),
|
||||
// Username: wc.InfluxUser,
|
||||
// Password: wc.InfluxPass,
|
||||
// })
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// bp, err := client.NewBatchPoints(client.BatchPointsConfig{
|
||||
// Database: wc.InfluxDB,
|
||||
// })
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// for _, p := range *data {
|
||||
|
||||
// tags := map[string]string{"city": p.city, "fuel": p.Fuel}
|
||||
// fields := map[string]interface{}{"value": p.Value}
|
||||
|
||||
// point, _ := client.NewPoint(
|
||||
// wc.Table,
|
||||
// tags,
|
||||
// fields,
|
||||
// time.Now(),
|
||||
// )
|
||||
|
||||
// log.Println(point)
|
||||
|
||||
// bp.AddPoint(point)
|
||||
// err = httpClient.Write(bp)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// Usage displays possible arguments
|
||||
func Usage() {
|
||||
|
9
go.mod
Normal file
9
go.mod
Normal file
@ -0,0 +1,9 @@
|
||||
module weather
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc
|
||||
github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945 // indirect
|
||||
gopkg.in/ini.v1 v1.44.0
|
||||
)
|
17
go.sum
Normal file
17
go.sum
Normal file
@ -0,0 +1,17 @@
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc h1:KpMgaYJRieDkHZJWY3LMafvtqS/U8xX6+lUN+OKpl/Y=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945 h1:N8Bg45zpk/UcpNGnfJt2y/3lRWASHNTUET8owPYCgYI=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
gopkg.in/ini.v1 v1.44.0 h1:YRJzTUp0kSYWUVFF5XAbDFfyiqwsl0Vb9R8TVP5eRi0=
|
||||
gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
28
types.go
28
types.go
@ -2,14 +2,22 @@ package main
|
||||
|
||||
// WeatherConfig is the main configuration
|
||||
type WeatherConfig struct {
|
||||
OwmURL string
|
||||
OwmAppID string
|
||||
OwmCities []string
|
||||
OwmMeasurements []string
|
||||
OwmTable string
|
||||
InfluxHost string
|
||||
InfluxPort int
|
||||
InfluxUser string
|
||||
InfluxPass string
|
||||
InfluxDB string
|
||||
WeatherVersion string
|
||||
WeatherAppID string
|
||||
WeatherCities []string
|
||||
WeatherMeasurements []string
|
||||
WeatherTable string
|
||||
InfluxHost string
|
||||
InfluxPort int
|
||||
InfluxUser string
|
||||
InfluxPass string
|
||||
InfluxDB string
|
||||
}
|
||||
|
||||
// Data is object for temperature and humidity for a city
|
||||
type Data struct {
|
||||
City string `json:"name"`
|
||||
Temperature float64
|
||||
Humidity float64
|
||||
Main map[string]float64
|
||||
}
|
||||
|
21
vendor/github.com/influxdata/influxdb1-client/LICENSE
generated
vendored
Normal file
21
vendor/github.com/influxdata/influxdb1-client/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 InfluxData
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
29
vendor/github.com/influxdata/influxdb1-client/README.md
generated
vendored
Normal file
29
vendor/github.com/influxdata/influxdb1-client/README.md
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
# influxdb1-clientv2
|
||||
influxdb1-clientv2 is the current Go client API for InfluxDB 1.x. A Go client for the 2.0 API will be coming soon.
|
||||
|
||||
InfluxDB is an open-source distributed time series database, find more about [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/) at https://docs.influxdata.com/influxdb/latest
|
||||
|
||||
## Usage
|
||||
To import into your Go project, run the following command in your terminal:
|
||||
`go get github.com/influxdata/influxdb1-client/v2`
|
||||
Then, in your import declaration section of your Go file, paste the following:
|
||||
`import "github.com/influxdata/influxdb1-client/v2"`
|
||||
|
||||
## Example
|
||||
The following example creates a new client to the InfluxDB host on localhost:8086 and runs a query for the measurement `cpu_load` from the `mydb` database.
|
||||
``` go
|
||||
func ExampleClient_query() {
|
||||
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||
Addr: "http://localhost:8086",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("Error creating InfluxDB Client: ", err.Error())
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
q := client.NewQuery("SELECT count(value) FROM cpu_load", "mydb", "")
|
||||
if response, err := c.Query(q); err == nil && response.Error() == nil {
|
||||
fmt.Println(response.Results)
|
||||
}
|
||||
}
|
||||
```
|
870
vendor/github.com/influxdata/influxdb1-client/influxdb.go
generated
vendored
Normal file
870
vendor/github.com/influxdata/influxdb1-client/influxdb.go
generated
vendored
Normal file
@ -0,0 +1,870 @@
|
||||
// Package client implements a now-deprecated client for InfluxDB;
|
||||
// use github.com/influxdata/influxdb1-client/v2 instead.
|
||||
package client // import "github.com/influxdata/influxdb1-client"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb1-client/models"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultHost is the default host used to connect to an InfluxDB instance
|
||||
DefaultHost = "localhost"
|
||||
|
||||
// DefaultPort is the default port used to connect to an InfluxDB instance
|
||||
DefaultPort = 8086
|
||||
|
||||
// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
|
||||
DefaultTimeout = 0
|
||||
)
|
||||
|
||||
// Query is used to send a command to the server. Both Command and Database are required.
|
||||
type Query struct {
|
||||
Command string
|
||||
Database string
|
||||
|
||||
// RetentionPolicy tells the server which retention policy to use by default.
|
||||
// This option is only effective when querying a server of version 1.6.0 or later.
|
||||
RetentionPolicy string
|
||||
|
||||
// Chunked tells the server to send back chunked responses. This places
|
||||
// less load on the server by sending back chunks of the response rather
|
||||
// than waiting for the entire response all at once.
|
||||
Chunked bool
|
||||
|
||||
// ChunkSize sets the maximum number of rows that will be returned per
|
||||
// chunk. Chunks are either divided based on their series or if they hit
|
||||
// the chunk size limit.
|
||||
//
|
||||
// Chunked must be set to true for this option to be used.
|
||||
ChunkSize int
|
||||
|
||||
// NodeID sets the data node to use for the query results. This option only
|
||||
// has any effect in the enterprise version of the software where there can be
|
||||
// more than one data node and is primarily useful for analyzing differences in
|
||||
// data. The default behavior is to automatically select the appropriate data
|
||||
// nodes to retrieve all of the data. On a database where the number of data nodes
|
||||
// is greater than the replication factor, it is expected that setting this option
|
||||
// will only retrieve partial data.
|
||||
NodeID int
|
||||
}
|
||||
|
||||
// ParseConnectionString will parse a string to create a valid connection URL
|
||||
func ParseConnectionString(path string, ssl bool) (url.URL, error) {
|
||||
var host string
|
||||
var port int
|
||||
|
||||
h, p, err := net.SplitHostPort(path)
|
||||
if err != nil {
|
||||
if path == "" {
|
||||
host = DefaultHost
|
||||
} else {
|
||||
host = path
|
||||
}
|
||||
// If they didn't specify a port, always use the default port
|
||||
port = DefaultPort
|
||||
} else {
|
||||
host = h
|
||||
port, err = strconv.Atoi(p)
|
||||
if err != nil {
|
||||
return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: host,
|
||||
}
|
||||
if ssl {
|
||||
u.Scheme = "https"
|
||||
if port != 443 {
|
||||
u.Host = net.JoinHostPort(host, strconv.Itoa(port))
|
||||
}
|
||||
} else if port != 80 {
|
||||
u.Host = net.JoinHostPort(host, strconv.Itoa(port))
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// Config is used to specify what server to connect to.
|
||||
// URL: The URL of the server connecting to.
|
||||
// Username/Password are optional. They will be passed via basic auth if provided.
|
||||
// UserAgent: If not provided, will default "InfluxDBClient",
|
||||
// Timeout: If not provided, will default to 0 (no timeout)
|
||||
type Config struct {
|
||||
URL url.URL
|
||||
UnixSocket string
|
||||
Username string
|
||||
Password string
|
||||
UserAgent string
|
||||
Timeout time.Duration
|
||||
Precision string
|
||||
WriteConsistency string
|
||||
UnsafeSsl bool
|
||||
Proxy func(req *http.Request) (*url.URL, error)
|
||||
TLS *tls.Config
|
||||
}
|
||||
|
||||
// NewConfig will create a config to be used in connecting to the client
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Timeout: DefaultTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Client is used to make calls to the server.
|
||||
type Client struct {
|
||||
url url.URL
|
||||
unixSocket string
|
||||
username string
|
||||
password string
|
||||
httpClient *http.Client
|
||||
userAgent string
|
||||
precision string
|
||||
}
|
||||
|
||||
const (
|
||||
// ConsistencyOne requires at least one data node acknowledged a write.
|
||||
ConsistencyOne = "one"
|
||||
|
||||
// ConsistencyAll requires all data nodes to acknowledge a write.
|
||||
ConsistencyAll = "all"
|
||||
|
||||
// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
|
||||
ConsistencyQuorum = "quorum"
|
||||
|
||||
// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
|
||||
ConsistencyAny = "any"
|
||||
)
|
||||
|
||||
// NewClient will instantiate and return a connected client to issue commands to the server.
|
||||
func NewClient(c Config) (*Client, error) {
|
||||
tlsConfig := new(tls.Config)
|
||||
if c.TLS != nil {
|
||||
tlsConfig = c.TLS.Clone()
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = c.UnsafeSsl
|
||||
|
||||
tr := &http.Transport{
|
||||
Proxy: c.Proxy,
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
|
||||
if c.UnixSocket != "" {
|
||||
// No need for compression in local communications.
|
||||
tr.DisableCompression = true
|
||||
|
||||
tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.Dial("unix", c.UnixSocket)
|
||||
}
|
||||
}
|
||||
|
||||
client := Client{
|
||||
url: c.URL,
|
||||
unixSocket: c.UnixSocket,
|
||||
username: c.Username,
|
||||
password: c.Password,
|
||||
httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
|
||||
userAgent: c.UserAgent,
|
||||
precision: c.Precision,
|
||||
}
|
||||
if client.userAgent == "" {
|
||||
client.userAgent = "InfluxDBClient"
|
||||
}
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// SetAuth will update the username and passwords
|
||||
func (c *Client) SetAuth(u, p string) {
|
||||
c.username = u
|
||||
c.password = p
|
||||
}
|
||||
|
||||
// SetPrecision will update the precision
|
||||
func (c *Client) SetPrecision(precision string) {
|
||||
c.precision = precision
|
||||
}
|
||||
|
||||
// Query sends a command to the server and returns the Response
|
||||
func (c *Client) Query(q Query) (*Response, error) {
|
||||
return c.QueryContext(context.Background(), q)
|
||||
}
|
||||
|
||||
// QueryContext sends a command to the server and returns the Response
|
||||
// It uses a context that can be cancelled by the command line client
|
||||
func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) {
|
||||
u := c.url
|
||||
u.Path = path.Join(u.Path, "query")
|
||||
|
||||
values := u.Query()
|
||||
values.Set("q", q.Command)
|
||||
values.Set("db", q.Database)
|
||||
if q.RetentionPolicy != "" {
|
||||
values.Set("rp", q.RetentionPolicy)
|
||||
}
|
||||
if q.Chunked {
|
||||
values.Set("chunked", "true")
|
||||
if q.ChunkSize > 0 {
|
||||
values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
|
||||
}
|
||||
}
|
||||
if q.NodeID > 0 {
|
||||
values.Set("node_id", strconv.Itoa(q.NodeID))
|
||||
}
|
||||
if c.precision != "" {
|
||||
values.Set("epoch", c.precision)
|
||||
}
|
||||
u.RawQuery = values.Encode()
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", c.userAgent)
|
||||
if c.username != "" {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var response Response
|
||||
if q.Chunked {
|
||||
cr := NewChunkedResponse(resp.Body)
|
||||
for {
|
||||
r, err := cr.NextResponse()
|
||||
if err != nil {
|
||||
// If we got an error while decoding the response, send that back.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r == nil {
|
||||
break
|
||||
}
|
||||
|
||||
response.Results = append(response.Results, r.Results...)
|
||||
if r.Err != nil {
|
||||
response.Err = r.Err
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
dec.UseNumber()
|
||||
if err := dec.Decode(&response); err != nil {
|
||||
// Ignore EOF errors if we got an invalid status code.
|
||||
if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't have an error in our json response, and didn't get StatusOK,
|
||||
// then send back an error.
|
||||
if resp.StatusCode != http.StatusOK && response.Error() == nil {
|
||||
return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
|
||||
}
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// Write takes BatchPoints and allows for writing of multiple points with defaults
|
||||
// If successful, error is nil and Response is nil
|
||||
// If an error occurs, Response may contain additional information if populated.
|
||||
func (c *Client) Write(bp BatchPoints) (*Response, error) {
|
||||
u := c.url
|
||||
u.Path = path.Join(u.Path, "write")
|
||||
|
||||
var b bytes.Buffer
|
||||
for _, p := range bp.Points {
|
||||
err := checkPointTypes(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.Raw != "" {
|
||||
if _, err := b.WriteString(p.Raw); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
for k, v := range bp.Tags {
|
||||
if p.Tags == nil {
|
||||
p.Tags = make(map[string]string, len(bp.Tags))
|
||||
}
|
||||
p.Tags[k] = v
|
||||
}
|
||||
|
||||
if _, err := b.WriteString(p.MarshalString()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.WriteByte('\n'); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), &b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "")
|
||||
req.Header.Set("User-Agent", c.userAgent)
|
||||
if c.username != "" {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
|
||||
precision := bp.Precision
|
||||
if precision == "" {
|
||||
precision = c.precision
|
||||
}
|
||||
|
||||
params := req.URL.Query()
|
||||
params.Set("db", bp.Database)
|
||||
params.Set("rp", bp.RetentionPolicy)
|
||||
params.Set("precision", precision)
|
||||
params.Set("consistency", bp.WriteConsistency)
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var response Response
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||
var err = fmt.Errorf(string(body))
|
||||
response.Err = err
|
||||
return &response, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// WriteLineProtocol takes a string with line returns to delimit each write
|
||||
// If successful, error is nil and Response is nil
|
||||
// If an error occurs, Response may contain additional information if populated.
|
||||
func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
|
||||
u := c.url
|
||||
u.Path = path.Join(u.Path, "write")
|
||||
|
||||
r := strings.NewReader(data)
|
||||
|
||||
req, err := http.NewRequest("POST", u.String(), r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "")
|
||||
req.Header.Set("User-Agent", c.userAgent)
|
||||
if c.username != "" {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("db", database)
|
||||
params.Set("rp", retentionPolicy)
|
||||
params.Set("precision", precision)
|
||||
params.Set("consistency", writeConsistency)
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var response Response
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||
err := fmt.Errorf(string(body))
|
||||
response.Err = err
|
||||
return &response, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ping will check to see if the server is up
|
||||
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
|
||||
func (c *Client) Ping() (time.Duration, string, error) {
|
||||
now := time.Now()
|
||||
|
||||
u := c.url
|
||||
u.Path = path.Join(u.Path, "ping")
|
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
req.Header.Set("User-Agent", c.userAgent)
|
||||
if c.username != "" {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
version := resp.Header.Get("X-Influxdb-Version")
|
||||
return time.Since(now), version, nil
|
||||
}
|
||||
|
||||
// Structs
|
||||
|
||||
// Message represents a user message.
|
||||
type Message struct {
|
||||
Level string `json:"level,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
}
|
||||
|
||||
// Result represents a resultset returned from a single statement.
|
||||
type Result struct {
|
||||
Series []models.Row
|
||||
Messages []*Message
|
||||
Err error
|
||||
}
|
||||
|
||||
// MarshalJSON encodes the result into JSON.
|
||||
func (r *Result) MarshalJSON() ([]byte, error) {
|
||||
// Define a struct that outputs "error" as a string.
|
||||
var o struct {
|
||||
Series []models.Row `json:"series,omitempty"`
|
||||
Messages []*Message `json:"messages,omitempty"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Copy fields to output struct.
|
||||
o.Series = r.Series
|
||||
o.Messages = r.Messages
|
||||
if r.Err != nil {
|
||||
o.Err = r.Err.Error()
|
||||
}
|
||||
|
||||
return json.Marshal(&o)
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes the data into the Result struct
|
||||
func (r *Result) UnmarshalJSON(b []byte) error {
|
||||
var o struct {
|
||||
Series []models.Row `json:"series,omitempty"`
|
||||
Messages []*Message `json:"messages,omitempty"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(bytes.NewBuffer(b))
|
||||
dec.UseNumber()
|
||||
err := dec.Decode(&o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Series = o.Series
|
||||
r.Messages = o.Messages
|
||||
if o.Err != "" {
|
||||
r.Err = errors.New(o.Err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Response represents a list of statement results.
|
||||
type Response struct {
|
||||
Results []Result
|
||||
Err error
|
||||
}
|
||||
|
||||
// MarshalJSON encodes the response into JSON.
|
||||
func (r *Response) MarshalJSON() ([]byte, error) {
|
||||
// Define a struct that outputs "error" as a string.
|
||||
var o struct {
|
||||
Results []Result `json:"results,omitempty"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Copy fields to output struct.
|
||||
o.Results = r.Results
|
||||
if r.Err != nil {
|
||||
o.Err = r.Err.Error()
|
||||
}
|
||||
|
||||
return json.Marshal(&o)
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes the data into the Response struct
|
||||
func (r *Response) UnmarshalJSON(b []byte) error {
|
||||
var o struct {
|
||||
Results []Result `json:"results,omitempty"`
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(bytes.NewBuffer(b))
|
||||
dec.UseNumber()
|
||||
err := dec.Decode(&o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Results = o.Results
|
||||
if o.Err != "" {
|
||||
r.Err = errors.New(o.Err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error returns the first error from any statement.
|
||||
// Returns nil if no errors occurred on any statements.
|
||||
func (r *Response) Error() error {
|
||||
if r.Err != nil {
|
||||
return r.Err
|
||||
}
|
||||
for _, result := range r.Results {
|
||||
if result.Err != nil {
|
||||
return result.Err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// duplexReader reads responses and writes it to another writer while
|
||||
// satisfying the reader interface.
|
||||
type duplexReader struct {
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (r *duplexReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.r.Read(p)
|
||||
if err == nil {
|
||||
r.w.Write(p[:n])
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ChunkedResponse represents a response from the server that
|
||||
// uses chunking to stream the output.
|
||||
type ChunkedResponse struct {
|
||||
dec *json.Decoder
|
||||
duplex *duplexReader
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
// NewChunkedResponse reads a stream and produces responses from the stream.
|
||||
func NewChunkedResponse(r io.Reader) *ChunkedResponse {
|
||||
resp := &ChunkedResponse{}
|
||||
resp.duplex = &duplexReader{r: r, w: &resp.buf}
|
||||
resp.dec = json.NewDecoder(resp.duplex)
|
||||
resp.dec.UseNumber()
|
||||
return resp
|
||||
}
|
||||
|
||||
// NextResponse reads the next line of the stream and returns a response.
|
||||
func (r *ChunkedResponse) NextResponse() (*Response, error) {
|
||||
var response Response
|
||||
if err := r.dec.Decode(&response); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, nil
|
||||
}
|
||||
// A decoding error happened. This probably means the server crashed
|
||||
// and sent a last-ditch error message to us. Ensure we have read the
|
||||
// entirety of the connection to get any remaining error text.
|
||||
io.Copy(ioutil.Discard, r.duplex)
|
||||
return nil, errors.New(strings.TrimSpace(r.buf.String()))
|
||||
}
|
||||
r.buf.Reset()
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// Point defines the fields that will be written to the database
|
||||
// Measurement, Time, and Fields are required
|
||||
// Precision can be specified if the time is in epoch format (integer).
|
||||
// Valid values for Precision are n, u, ms, s, m, and h
|
||||
type Point struct {
|
||||
Measurement string
|
||||
Tags map[string]string
|
||||
Time time.Time
|
||||
Fields map[string]interface{}
|
||||
Precision string
|
||||
Raw string
|
||||
}
|
||||
|
||||
// MarshalJSON will format the time in RFC3339Nano
|
||||
// Precision is also ignored as it is only used for writing, not reading
|
||||
// Or another way to say it is we always send back in nanosecond precision
|
||||
func (p *Point) MarshalJSON() ([]byte, error) {
|
||||
point := struct {
|
||||
Measurement string `json:"measurement,omitempty"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
Time string `json:"time,omitempty"`
|
||||
Fields map[string]interface{} `json:"fields,omitempty"`
|
||||
Precision string `json:"precision,omitempty"`
|
||||
}{
|
||||
Measurement: p.Measurement,
|
||||
Tags: p.Tags,
|
||||
Fields: p.Fields,
|
||||
Precision: p.Precision,
|
||||
}
|
||||
// Let it omit empty if it's really zero
|
||||
if !p.Time.IsZero() {
|
||||
point.Time = p.Time.UTC().Format(time.RFC3339Nano)
|
||||
}
|
||||
return json.Marshal(&point)
|
||||
}
|
||||
|
||||
// MarshalString renders string representation of a Point with specified
|
||||
// precision. The default precision is nanoseconds.
|
||||
func (p *Point) MarshalString() string {
|
||||
pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time)
|
||||
if err != nil {
|
||||
return "# ERROR: " + err.Error() + " " + p.Measurement
|
||||
}
|
||||
if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
|
||||
return pt.String()
|
||||
}
|
||||
return pt.PrecisionString(p.Precision)
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes the data into the Point struct
|
||||
func (p *Point) UnmarshalJSON(b []byte) error {
|
||||
var normal struct {
|
||||
Measurement string `json:"measurement"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Time time.Time `json:"time"`
|
||||
Precision string `json:"precision"`
|
||||
Fields map[string]interface{} `json:"fields"`
|
||||
}
|
||||
var epoch struct {
|
||||
Measurement string `json:"measurement"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Time *int64 `json:"time"`
|
||||
Precision string `json:"precision"`
|
||||
Fields map[string]interface{} `json:"fields"`
|
||||
}
|
||||
|
||||
if err := func() error {
|
||||
var err error
|
||||
dec := json.NewDecoder(bytes.NewBuffer(b))
|
||||
dec.UseNumber()
|
||||
if err = dec.Decode(&epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
// Convert from epoch to time.Time, but only if Time
|
||||
// was actually set.
|
||||
var ts time.Time
|
||||
if epoch.Time != nil {
|
||||
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
p.Measurement = epoch.Measurement
|
||||
p.Tags = epoch.Tags
|
||||
p.Time = ts
|
||||
p.Precision = epoch.Precision
|
||||
p.Fields = normalizeFields(epoch.Fields)
|
||||
return nil
|
||||
}(); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(bytes.NewBuffer(b))
|
||||
dec.UseNumber()
|
||||
if err := dec.Decode(&normal); err != nil {
|
||||
return err
|
||||
}
|
||||
normal.Time = SetPrecision(normal.Time, normal.Precision)
|
||||
p.Measurement = normal.Measurement
|
||||
p.Tags = normal.Tags
|
||||
p.Time = normal.Time
|
||||
p.Precision = normal.Precision
|
||||
p.Fields = normalizeFields(normal.Fields)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove any notion of json.Number
|
||||
func normalizeFields(fields map[string]interface{}) map[string]interface{} {
|
||||
newFields := map[string]interface{}{}
|
||||
|
||||
for k, v := range fields {
|
||||
switch v := v.(type) {
|
||||
case json.Number:
|
||||
jv, e := v.Float64()
|
||||
if e != nil {
|
||||
panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
|
||||
}
|
||||
newFields[k] = jv
|
||||
default:
|
||||
newFields[k] = v
|
||||
}
|
||||
}
|
||||
return newFields
|
||||
}
|
||||
|
||||
// BatchPoints is used to send batched data in a single write.
|
||||
// Database and Points are required
|
||||
// If no retention policy is specified, it will use the databases default retention policy.
|
||||
// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
|
||||
// If time is specified, it will be applied to any point with an empty time.
|
||||
// Precision can be specified if the time is in epoch format (integer).
|
||||
// Valid values for Precision are n, u, ms, s, m, and h
|
||||
type BatchPoints struct {
|
||||
Points []Point `json:"points,omitempty"`
|
||||
Database string `json:"database,omitempty"`
|
||||
RetentionPolicy string `json:"retentionPolicy,omitempty"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
Time time.Time `json:"time,omitempty"`
|
||||
Precision string `json:"precision,omitempty"`
|
||||
WriteConsistency string `json:"-"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes the data into the BatchPoints struct
|
||||
func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
|
||||
var normal struct {
|
||||
Points []Point `json:"points"`
|
||||
Database string `json:"database"`
|
||||
RetentionPolicy string `json:"retentionPolicy"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Time time.Time `json:"time"`
|
||||
Precision string `json:"precision"`
|
||||
}
|
||||
var epoch struct {
|
||||
Points []Point `json:"points"`
|
||||
Database string `json:"database"`
|
||||
RetentionPolicy string `json:"retentionPolicy"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Time *int64 `json:"time"`
|
||||
Precision string `json:"precision"`
|
||||
}
|
||||
|
||||
if err := func() error {
|
||||
var err error
|
||||
if err = json.Unmarshal(b, &epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
// Convert from epoch to time.Time
|
||||
var ts time.Time
|
||||
if epoch.Time != nil {
|
||||
ts, err = EpochToTime(*epoch.Time, epoch.Precision)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
bp.Points = epoch.Points
|
||||
bp.Database = epoch.Database
|
||||
bp.RetentionPolicy = epoch.RetentionPolicy
|
||||
bp.Tags = epoch.Tags
|
||||
bp.Time = ts
|
||||
bp.Precision = epoch.Precision
|
||||
return nil
|
||||
}(); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &normal); err != nil {
|
||||
return err
|
||||
}
|
||||
normal.Time = SetPrecision(normal.Time, normal.Precision)
|
||||
bp.Points = normal.Points
|
||||
bp.Database = normal.Database
|
||||
bp.RetentionPolicy = normal.RetentionPolicy
|
||||
bp.Tags = normal.Tags
|
||||
bp.Time = normal.Time
|
||||
bp.Precision = normal.Precision
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// utility functions
|
||||
|
||||
// Addr provides the current url as a string of the server the client is connected to.
|
||||
func (c *Client) Addr() string {
|
||||
if c.unixSocket != "" {
|
||||
return c.unixSocket
|
||||
}
|
||||
return c.url.String()
|
||||
}
|
||||
|
||||
// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
|
||||
func checkPointTypes(p Point) error {
|
||||
for _, v := range p.Fields {
|
||||
switch v.(type) {
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool, string, nil:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unsupported point type: %T", v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// helper functions
|
||||
|
||||
// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
|
||||
func EpochToTime(epoch int64, precision string) (time.Time, error) {
|
||||
if precision == "" {
|
||||
precision = "s"
|
||||
}
|
||||
var t time.Time
|
||||
switch precision {
|
||||
case "h":
|
||||
t = time.Unix(0, epoch*int64(time.Hour))
|
||||
case "m":
|
||||
t = time.Unix(0, epoch*int64(time.Minute))
|
||||
case "s":
|
||||
t = time.Unix(0, epoch*int64(time.Second))
|
||||
case "ms":
|
||||
t = time.Unix(0, epoch*int64(time.Millisecond))
|
||||
case "u":
|
||||
t = time.Unix(0, epoch*int64(time.Microsecond))
|
||||
case "n":
|
||||
t = time.Unix(0, epoch)
|
||||
default:
|
||||
return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// SetPrecision will round a time to the specified precision
|
||||
func SetPrecision(t time.Time, precision string) time.Time {
|
||||
switch precision {
|
||||
case "n":
|
||||
case "u":
|
||||
return t.Round(time.Microsecond)
|
||||
case "ms":
|
||||
return t.Round(time.Millisecond)
|
||||
case "s":
|
||||
return t.Round(time.Second)
|
||||
case "m":
|
||||
return t.Round(time.Minute)
|
||||
case "h":
|
||||
return t.Round(time.Hour)
|
||||
}
|
||||
return t
|
||||
}
|
32
vendor/github.com/influxdata/influxdb1-client/models/inline_fnv.go
generated
vendored
Normal file
32
vendor/github.com/influxdata/influxdb1-client/models/inline_fnv.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package models // import "github.com/influxdata/influxdb1-client/models"
|
||||
|
||||
// from stdlib hash/fnv/fnv.go
|
||||
const (
|
||||
prime64 = 1099511628211
|
||||
offset64 = 14695981039346656037
|
||||
)
|
||||
|
||||
// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
|
||||
// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
|
||||
type InlineFNV64a uint64
|
||||
|
||||
// NewInlineFNV64a returns a new instance of InlineFNV64a.
|
||||
func NewInlineFNV64a() InlineFNV64a {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// Write adds data to the running hash.
|
||||
func (s *InlineFNV64a) Write(data []byte) (int, error) {
|
||||
hash := uint64(*s)
|
||||
for _, c := range data {
|
||||
hash ^= uint64(c)
|
||||
hash *= prime64
|
||||
}
|
||||
*s = InlineFNV64a(hash)
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
// Sum64 returns the uint64 of the current resulting hash.
|
||||
func (s *InlineFNV64a) Sum64() uint64 {
|
||||
return uint64(*s)
|
||||
}
|
44
vendor/github.com/influxdata/influxdb1-client/models/inline_strconv_parse.go
generated
vendored
Normal file
44
vendor/github.com/influxdata/influxdb1-client/models/inline_strconv_parse.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package models // import "github.com/influxdata/influxdb1-client/models"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseInt(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
|
||||
func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseUint(s, base, bitSize)
|
||||
}
|
||||
|
||||
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||
s := unsafeBytesToString(b)
|
||||
return strconv.ParseFloat(s, bitSize)
|
||||
}
|
||||
|
||||
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||
func parseBoolBytes(b []byte) (bool, error) {
|
||||
return strconv.ParseBool(unsafeBytesToString(b))
|
||||
}
|
||||
|
||||
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||
//
|
||||
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||
// that require strings.
|
||||
func unsafeBytesToString(in []byte) string {
|
||||
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||
dst := reflect.StringHeader{
|
||||
Data: src.Data,
|
||||
Len: src.Len,
|
||||
}
|
||||
s := *(*string)(unsafe.Pointer(&dst))
|
||||
return s
|
||||
}
|
2413
vendor/github.com/influxdata/influxdb1-client/models/points.go
generated
vendored
Normal file
2413
vendor/github.com/influxdata/influxdb1-client/models/points.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
62
vendor/github.com/influxdata/influxdb1-client/models/rows.go
generated
vendored
Normal file
62
vendor/github.com/influxdata/influxdb1-client/models/rows.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Row represents a single row returned from the execution of a statement.
|
||||
type Row struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
Columns []string `json:"columns,omitempty"`
|
||||
Values [][]interface{} `json:"values,omitempty"`
|
||||
Partial bool `json:"partial,omitempty"`
|
||||
}
|
||||
|
||||
// SameSeries returns true if r contains values for the same series as o.
|
||||
func (r *Row) SameSeries(o *Row) bool {
|
||||
return r.tagsHash() == o.tagsHash() && r.Name == o.Name
|
||||
}
|
||||
|
||||
// tagsHash returns a hash of tag key/value pairs.
|
||||
func (r *Row) tagsHash() uint64 {
|
||||
h := NewInlineFNV64a()
|
||||
keys := r.tagsKeys()
|
||||
for _, k := range keys {
|
||||
h.Write([]byte(k))
|
||||
h.Write([]byte(r.Tags[k]))
|
||||
}
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
// tagKeys returns a sorted list of tag keys.
|
||||
func (r *Row) tagsKeys() []string {
|
||||
a := make([]string, 0, len(r.Tags))
|
||||
for k := range r.Tags {
|
||||
a = append(a, k)
|
||||
}
|
||||
sort.Strings(a)
|
||||
return a
|
||||
}
|
||||
|
||||
// Rows represents a collection of rows. Rows implements sort.Interface.
|
||||
type Rows []*Row
|
||||
|
||||
// Len implements sort.Interface.
|
||||
func (p Rows) Len() int { return len(p) }
|
||||
|
||||
// Less implements sort.Interface.
|
||||
func (p Rows) Less(i, j int) bool {
|
||||
// Sort by name first.
|
||||
if p[i].Name != p[j].Name {
|
||||
return p[i].Name < p[j].Name
|
||||
}
|
||||
|
||||
// Sort by tag set hash. Tags don't have a meaningful sort order so we
|
||||
// just compute a hash and sort by that instead. This allows the tests
|
||||
// to receive rows in a predictable order every time.
|
||||
return p[i].tagsHash() < p[j].tagsHash()
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface.
|
||||
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
42
vendor/github.com/influxdata/influxdb1-client/models/statistic.go
generated
vendored
Normal file
42
vendor/github.com/influxdata/influxdb1-client/models/statistic.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package models
|
||||
|
||||
// Statistic is the representation of a statistic used by the monitoring service.
|
||||
type Statistic struct {
|
||||
Name string `json:"name"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Values map[string]interface{} `json:"values"`
|
||||
}
|
||||
|
||||
// NewStatistic returns an initialized Statistic.
|
||||
func NewStatistic(name string) Statistic {
|
||||
return Statistic{
|
||||
Name: name,
|
||||
Tags: make(map[string]string),
|
||||
Values: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
// StatisticTags is a map that can be merged with others without causing
|
||||
// mutations to either map.
|
||||
type StatisticTags map[string]string
|
||||
|
||||
// Merge creates a new map containing the merged contents of tags and t.
|
||||
// If both tags and the receiver map contain the same key, the value in tags
|
||||
// is used in the resulting map.
|
||||
//
|
||||
// Merge always returns a usable map.
|
||||
func (t StatisticTags) Merge(tags map[string]string) map[string]string {
|
||||
// Add everything in tags to the result.
|
||||
out := make(map[string]string, len(tags))
|
||||
for k, v := range tags {
|
||||
out[k] = v
|
||||
}
|
||||
|
||||
// Only add values from t that don't appear in tags.
|
||||
for k, v := range t {
|
||||
if _, ok := tags[k]; !ok {
|
||||
out[k] = v
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
74
vendor/github.com/influxdata/influxdb1-client/models/time.go
generated
vendored
Normal file
74
vendor/github.com/influxdata/influxdb1-client/models/time.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
package models
|
||||
|
||||
// Helper time methods since parsing time can easily overflow and we only support a
|
||||
// specific time range.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinNanoTime is the minumum time that can be represented.
|
||||
//
|
||||
// 1677-09-21 00:12:43.145224194 +0000 UTC
|
||||
//
|
||||
// The two lowest minimum integers are used as sentinel values. The
|
||||
// minimum value needs to be used as a value lower than any other value for
|
||||
// comparisons and another separate value is needed to act as a sentinel
|
||||
// default value that is unusable by the user, but usable internally.
|
||||
// Because these two values need to be used for a special purpose, we do
|
||||
// not allow users to write points at these two times.
|
||||
MinNanoTime = int64(math.MinInt64) + 2
|
||||
|
||||
// MaxNanoTime is the maximum time that can be represented.
|
||||
//
|
||||
// 2262-04-11 23:47:16.854775806 +0000 UTC
|
||||
//
|
||||
// The highest time represented by a nanosecond needs to be used for an
|
||||
// exclusive range in the shard group, so the maximum time needs to be one
|
||||
// less than the possible maximum number of nanoseconds representable by an
|
||||
// int64 so that we don't lose a point at that one time.
|
||||
MaxNanoTime = int64(math.MaxInt64) - 1
|
||||
)
|
||||
|
||||
var (
|
||||
minNanoTime = time.Unix(0, MinNanoTime).UTC()
|
||||
maxNanoTime = time.Unix(0, MaxNanoTime).UTC()
|
||||
|
||||
// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
|
||||
ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime)
|
||||
)
|
||||
|
||||
// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
|
||||
// supported range.
|
||||
func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
|
||||
mult := GetPrecisionMultiplier(precision)
|
||||
if t, ok := safeSignedMult(timestamp, mult); ok {
|
||||
tme := time.Unix(0, t).UTC()
|
||||
return tme, CheckTime(tme)
|
||||
}
|
||||
|
||||
return time.Time{}, ErrTimeOutOfRange
|
||||
}
|
||||
|
||||
// CheckTime checks that a time is within the safe range.
|
||||
func CheckTime(t time.Time) error {
|
||||
if t.Before(minNanoTime) || t.After(maxNanoTime) {
|
||||
return ErrTimeOutOfRange
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perform the multiplication and check to make sure it didn't overflow.
|
||||
func safeSignedMult(a, b int64) (int64, bool) {
|
||||
if a == 0 || b == 0 || a == 1 || b == 1 {
|
||||
return a * b, true
|
||||
}
|
||||
if a == MinNanoTime || b == MaxNanoTime {
|
||||
return 0, false
|
||||
}
|
||||
c := a * b
|
||||
return c, c/b == a
|
||||
}
|
7
vendor/github.com/influxdata/influxdb1-client/models/uint_support.go
generated
vendored
Normal file
7
vendor/github.com/influxdata/influxdb1-client/models/uint_support.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build uint uint64
|
||||
|
||||
package models
|
||||
|
||||
func init() {
|
||||
EnableUintSupport()
|
||||
}
|
115
vendor/github.com/influxdata/influxdb1-client/pkg/escape/bytes.go
generated
vendored
Normal file
115
vendor/github.com/influxdata/influxdb1-client/pkg/escape/bytes.go
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
// Package escape contains utilities for escaping parts of InfluxQL
|
||||
// and InfluxDB line protocol.
|
||||
package escape // import "github.com/influxdata/influxdb1-client/pkg/escape"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Codes is a map of bytes to be escaped.
|
||||
var Codes = map[byte][]byte{
|
||||
',': []byte(`\,`),
|
||||
'"': []byte(`\"`),
|
||||
' ': []byte(`\ `),
|
||||
'=': []byte(`\=`),
|
||||
}
|
||||
|
||||
// Bytes escapes characters on the input slice, as defined by Codes.
|
||||
func Bytes(in []byte) []byte {
|
||||
for b, esc := range Codes {
|
||||
in = bytes.Replace(in, []byte{b}, esc, -1)
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
const escapeChars = `," =`
|
||||
|
||||
// IsEscaped returns whether b has any escaped characters,
|
||||
// i.e. whether b seems to have been processed by Bytes.
|
||||
func IsEscaped(b []byte) bool {
|
||||
for len(b) > 0 {
|
||||
i := bytes.IndexByte(b, '\\')
|
||||
if i < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 {
|
||||
return true
|
||||
}
|
||||
b = b[i+1:]
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AppendUnescaped appends the unescaped version of src to dst
|
||||
// and returns the resulting slice.
|
||||
func AppendUnescaped(dst, src []byte) []byte {
|
||||
var pos int
|
||||
for len(src) > 0 {
|
||||
next := bytes.IndexByte(src[pos:], '\\')
|
||||
if next < 0 || pos+next+1 >= len(src) {
|
||||
return append(dst, src...)
|
||||
}
|
||||
|
||||
if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 {
|
||||
if pos+next > 0 {
|
||||
dst = append(dst, src[:pos+next]...)
|
||||
}
|
||||
src = src[pos+next+1:]
|
||||
pos = 0
|
||||
} else {
|
||||
pos += next + 1
|
||||
}
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// Unescape returns a new slice containing the unescaped version of in.
|
||||
func Unescape(in []byte) []byte {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes.IndexByte(in, '\\') == -1 {
|
||||
return in
|
||||
}
|
||||
|
||||
i := 0
|
||||
inLen := len(in)
|
||||
|
||||
// The output size will be no more than inLen. Preallocating the
|
||||
// capacity of the output is faster and uses less memory than
|
||||
// letting append() do its own (over)allocation.
|
||||
out := make([]byte, 0, inLen)
|
||||
|
||||
for {
|
||||
if i >= inLen {
|
||||
break
|
||||
}
|
||||
if in[i] == '\\' && i+1 < inLen {
|
||||
switch in[i+1] {
|
||||
case ',':
|
||||
out = append(out, ',')
|
||||
i += 2
|
||||
continue
|
||||
case '"':
|
||||
out = append(out, '"')
|
||||
i += 2
|
||||
continue
|
||||
case ' ':
|
||||
out = append(out, ' ')
|
||||
i += 2
|
||||
continue
|
||||
case '=':
|
||||
out = append(out, '=')
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
}
|
||||
out = append(out, in[i])
|
||||
i += 1
|
||||
}
|
||||
return out
|
||||
}
|
21
vendor/github.com/influxdata/influxdb1-client/pkg/escape/strings.go
generated
vendored
Normal file
21
vendor/github.com/influxdata/influxdb1-client/pkg/escape/strings.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package escape
|
||||
|
||||
import "strings"
|
||||
|
||||
var (
|
||||
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
|
||||
unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
|
||||
)
|
||||
|
||||
// UnescapeString returns unescaped version of in.
|
||||
func UnescapeString(in string) string {
|
||||
if strings.IndexByte(in, '\\') == -1 {
|
||||
return in
|
||||
}
|
||||
return unescaper.Replace(in)
|
||||
}
|
||||
|
||||
// String returns the escaped version of in.
|
||||
func String(in string) string {
|
||||
return escaper.Replace(in)
|
||||
}
|
6
vendor/gopkg.in/ini.v1/.gitignore
generated
vendored
Normal file
6
vendor/gopkg.in/ini.v1/.gitignore
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
testdata/conf_out.ini
|
||||
ini.sublime-project
|
||||
ini.sublime-workspace
|
||||
testdata/conf_reflect.ini
|
||||
.idea
|
||||
/.vscode
|
18
vendor/gopkg.in/ini.v1/.travis.yml
generated
vendored
Normal file
18
vendor/gopkg.in/ini.v1/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
|
||||
script:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/smartystreets/goconvey
|
||||
- mkdir -p $HOME/gopath/src/gopkg.in
|
||||
- ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1
|
||||
- cd $HOME/gopath/src/gopkg.in/ini.v1
|
||||
- go test -v -cover -race
|
191
vendor/gopkg.in/ini.v1/LICENSE
generated
vendored
Normal file
191
vendor/gopkg.in/ini.v1/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright 2014 Unknwon
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
15
vendor/gopkg.in/ini.v1/Makefile
generated
vendored
Normal file
15
vendor/gopkg.in/ini.v1/Makefile
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
.PHONY: build test bench vet coverage
|
||||
|
||||
build: vet bench
|
||||
|
||||
test:
|
||||
go test -v -cover -race
|
||||
|
||||
bench:
|
||||
go test -v -cover -race -test.bench=. -test.benchmem
|
||||
|
||||
vet:
|
||||
go vet
|
||||
|
||||
coverage:
|
||||
go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
|
46
vendor/gopkg.in/ini.v1/README.md
generated
vendored
Normal file
46
vendor/gopkg.in/ini.v1/README.md
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini)
|
||||
===
|
||||
|
||||
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
|
||||
|
||||
Package ini provides INI file read and write functionality in Go.
|
||||
|
||||
## Features
|
||||
|
||||
- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
|
||||
- Read with recursion values.
|
||||
- Read with parent-child sections.
|
||||
- Read with auto-increment key names.
|
||||
- Read with multiple-line values.
|
||||
- Read with tons of helper methods.
|
||||
- Read and convert values to Go types.
|
||||
- Read and **WRITE** comments of sections and keys.
|
||||
- Manipulate sections, keys and comments with ease.
|
||||
- Keep sections and keys in order as you parse and save.
|
||||
|
||||
## Installation
|
||||
|
||||
The minimum requirement of Go is **1.6**.
|
||||
|
||||
To use a tagged revision:
|
||||
|
||||
```sh
|
||||
$ go get gopkg.in/ini.v1
|
||||
```
|
||||
|
||||
To use with latest changes:
|
||||
|
||||
```sh
|
||||
$ go get github.com/go-ini/ini
|
||||
```
|
||||
|
||||
Please add `-u` flag to update in the future.
|
||||
|
||||
## Getting Help
|
||||
|
||||
- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
|
||||
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
|
||||
|
||||
## License
|
||||
|
||||
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
|
34
vendor/gopkg.in/ini.v1/error.go
generated
vendored
Normal file
34
vendor/gopkg.in/ini.v1/error.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2016 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
|
||||
type ErrDelimiterNotFound struct {
|
||||
Line string
|
||||
}
|
||||
|
||||
// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
|
||||
func IsErrDelimiterNotFound(err error) bool {
|
||||
_, ok := err.(ErrDelimiterNotFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (err ErrDelimiterNotFound) Error() string {
|
||||
return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
|
||||
}
|
418
vendor/gopkg.in/ini.v1/file.go
generated
vendored
Normal file
418
vendor/gopkg.in/ini.v1/file.go
generated
vendored
Normal file
@ -0,0 +1,418 @@
|
||||
// Copyright 2017 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// File represents a combination of a or more INI file(s) in memory.
|
||||
type File struct {
|
||||
options LoadOptions
|
||||
dataSources []dataSource
|
||||
|
||||
// Should make things safe, but sometimes doesn't matter.
|
||||
BlockMode bool
|
||||
lock sync.RWMutex
|
||||
|
||||
// To keep data in order.
|
||||
sectionList []string
|
||||
// Actual data is stored here.
|
||||
sections map[string]*Section
|
||||
|
||||
NameMapper
|
||||
ValueMapper
|
||||
}
|
||||
|
||||
// newFile initializes File object with given data sources.
|
||||
func newFile(dataSources []dataSource, opts LoadOptions) *File {
|
||||
if len(opts.KeyValueDelimiters) == 0 {
|
||||
opts.KeyValueDelimiters = "=:"
|
||||
}
|
||||
return &File{
|
||||
BlockMode: true,
|
||||
dataSources: dataSources,
|
||||
sections: make(map[string]*Section),
|
||||
sectionList: make([]string, 0, 10),
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// Empty returns an empty file object.
|
||||
func Empty() *File {
|
||||
// Ignore error here, we sure our data is good.
|
||||
f, _ := Load([]byte(""))
|
||||
return f
|
||||
}
|
||||
|
||||
// NewSection creates a new section.
|
||||
func (f *File) NewSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, errors.New("error creating new section: empty section name")
|
||||
} else if f.options.Insensitive && name != DefaultSection {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if inSlice(name, f.sectionList) {
|
||||
return f.sections[name], nil
|
||||
}
|
||||
|
||||
f.sectionList = append(f.sectionList, name)
|
||||
f.sections[name] = newSection(f, name)
|
||||
return f.sections[name], nil
|
||||
}
|
||||
|
||||
// NewRawSection creates a new section with an unparseable body.
|
||||
func (f *File) NewRawSection(name, body string) (*Section, error) {
|
||||
section, err := f.NewSection(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
section.isRawSection = true
|
||||
section.rawBody = body
|
||||
return section, nil
|
||||
}
|
||||
|
||||
// NewSections creates a list of sections.
|
||||
func (f *File) NewSections(names ...string) (err error) {
|
||||
for _, name := range names {
|
||||
if _, err = f.NewSection(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSection returns section by given name.
|
||||
func (f *File) GetSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
name = DefaultSection
|
||||
}
|
||||
if f.options.Insensitive {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
}
|
||||
|
||||
sec := f.sections[name]
|
||||
if sec == nil {
|
||||
return nil, fmt.Errorf("section '%s' does not exist", name)
|
||||
}
|
||||
return sec, nil
|
||||
}
|
||||
|
||||
// Section assumes named section exists and returns a zero-value when not.
|
||||
func (f *File) Section(name string) *Section {
|
||||
sec, err := f.GetSection(name)
|
||||
if err != nil {
|
||||
// Note: It's OK here because the only possible error is empty section name,
|
||||
// but if it's empty, this piece of code won't be executed.
|
||||
sec, _ = f.NewSection(name)
|
||||
return sec
|
||||
}
|
||||
return sec
|
||||
}
|
||||
|
||||
// Sections returns a list of Section stored in the current instance.
|
||||
func (f *File) Sections() []*Section {
|
||||
if f.BlockMode {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
}
|
||||
|
||||
sections := make([]*Section, len(f.sectionList))
|
||||
for i, name := range f.sectionList {
|
||||
sections[i] = f.sections[name]
|
||||
}
|
||||
return sections
|
||||
}
|
||||
|
||||
// ChildSections returns a list of child sections of given section name.
|
||||
func (f *File) ChildSections(name string) []*Section {
|
||||
return f.Section(name).ChildSections()
|
||||
}
|
||||
|
||||
// SectionStrings returns list of section names.
|
||||
func (f *File) SectionStrings() []string {
|
||||
list := make([]string, len(f.sectionList))
|
||||
copy(list, f.sectionList)
|
||||
return list
|
||||
}
|
||||
|
||||
// DeleteSection deletes a section.
|
||||
func (f *File) DeleteSection(name string) {
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if len(name) == 0 {
|
||||
name = DefaultSection
|
||||
}
|
||||
|
||||
for i, s := range f.sectionList {
|
||||
if s == name {
|
||||
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
|
||||
delete(f.sections, name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) reload(s dataSource) error {
|
||||
r, err := s.ReadCloser()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
return f.parse(r)
|
||||
}
|
||||
|
||||
// Reload reloads and parses all data sources.
|
||||
func (f *File) Reload() (err error) {
|
||||
for _, s := range f.dataSources {
|
||||
if err = f.reload(s); err != nil {
|
||||
// In loose mode, we create an empty default section for nonexistent files.
|
||||
if os.IsNotExist(err) && f.options.Loose {
|
||||
f.parse(bytes.NewBuffer(nil))
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append appends one or more data sources and reloads automatically.
|
||||
func (f *File) Append(source interface{}, others ...interface{}) error {
|
||||
ds, err := parseDataSource(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dataSources = append(f.dataSources, ds)
|
||||
for _, s := range others {
|
||||
ds, err = parseDataSource(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dataSources = append(f.dataSources, ds)
|
||||
}
|
||||
return f.Reload()
|
||||
}
|
||||
|
||||
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||
equalSign := DefaultFormatLeft + "=" + DefaultFormatRight
|
||||
|
||||
if PrettyFormat || PrettyEqual {
|
||||
equalSign = " = "
|
||||
}
|
||||
|
||||
// Use buffer to make sure target is safe until finish encoding.
|
||||
buf := bytes.NewBuffer(nil)
|
||||
for i, sname := range f.sectionList {
|
||||
sec := f.Section(sname)
|
||||
if len(sec.Comment) > 0 {
|
||||
// Support multiline comments
|
||||
lines := strings.Split(sec.Comment, LineBreak)
|
||||
for i := range lines {
|
||||
if lines[i][0] != '#' && lines[i][0] != ';' {
|
||||
lines[i] = "; " + lines[i]
|
||||
} else {
|
||||
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
|
||||
}
|
||||
|
||||
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i > 0 || DefaultHeader {
|
||||
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Write nothing if default section is empty
|
||||
if len(sec.keyList) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if sec.isRawSection {
|
||||
if _, err := buf.WriteString(sec.rawBody); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if PrettySection {
|
||||
// Put a line between sections
|
||||
if _, err := buf.WriteString(LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Count and generate alignment length and buffer spaces using the
|
||||
// longest key. Keys may be modifed if they contain certain characters so
|
||||
// we need to take that into account in our calculation.
|
||||
alignLength := 0
|
||||
if PrettyFormat {
|
||||
for _, kname := range sec.keyList {
|
||||
keyLength := len(kname)
|
||||
// First case will surround key by ` and second by """
|
||||
if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
|
||||
keyLength += 2
|
||||
} else if strings.Contains(kname, "`") {
|
||||
keyLength += 6
|
||||
}
|
||||
|
||||
if keyLength > alignLength {
|
||||
alignLength = keyLength
|
||||
}
|
||||
}
|
||||
}
|
||||
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
|
||||
|
||||
KEY_LIST:
|
||||
for _, kname := range sec.keyList {
|
||||
key := sec.Key(kname)
|
||||
if len(key.Comment) > 0 {
|
||||
if len(indent) > 0 && sname != DefaultSection {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
|
||||
// Support multiline comments
|
||||
lines := strings.Split(key.Comment, LineBreak)
|
||||
for i := range lines {
|
||||
if lines[i][0] != '#' && lines[i][0] != ';' {
|
||||
lines[i] = "; " + strings.TrimSpace(lines[i])
|
||||
} else {
|
||||
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
|
||||
}
|
||||
|
||||
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(indent) > 0 && sname != DefaultSection {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
|
||||
switch {
|
||||
case key.isAutoIncrement:
|
||||
kname = "-"
|
||||
case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
|
||||
kname = "`" + kname + "`"
|
||||
case strings.Contains(kname, "`"):
|
||||
kname = `"""` + kname + `"""`
|
||||
}
|
||||
|
||||
for _, val := range key.ValueWithShadows() {
|
||||
if _, err := buf.WriteString(kname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if key.isBooleanType {
|
||||
if kname != sec.keyList[len(sec.keyList)-1] {
|
||||
buf.WriteString(LineBreak)
|
||||
}
|
||||
continue KEY_LIST
|
||||
}
|
||||
|
||||
// Write out alignment spaces before "=" sign
|
||||
if PrettyFormat {
|
||||
buf.Write(alignSpaces[:alignLength-len(kname)])
|
||||
}
|
||||
|
||||
// In case key value contains "\n", "`", "\"", "#" or ";"
|
||||
if strings.ContainsAny(val, "\n`") {
|
||||
val = `"""` + val + `"""`
|
||||
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
|
||||
val = "`" + val + "`"
|
||||
}
|
||||
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, val := range key.nestedValues {
|
||||
if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if PrettySection {
|
||||
// Put a line between sections
|
||||
if _, err := buf.WriteString(LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// WriteToIndent writes content into io.Writer with given indention.
|
||||
// If PrettyFormat has been set to be true,
|
||||
// it will align "=" sign with spaces under each section.
|
||||
func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
|
||||
buf, err := f.writeToBuffer(indent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return buf.WriteTo(w)
|
||||
}
|
||||
|
||||
// WriteTo writes file content into io.Writer.
|
||||
func (f *File) WriteTo(w io.Writer) (int64, error) {
|
||||
return f.WriteToIndent(w, "")
|
||||
}
|
||||
|
||||
// SaveToIndent writes content to file system with given value indention.
|
||||
func (f *File) SaveToIndent(filename, indent string) error {
|
||||
// Note: Because we are truncating with os.Create,
|
||||
// so it's safer to save to a temporary file location and rename afte done.
|
||||
buf, err := f.writeToBuffer(indent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(filename, buf.Bytes(), 0666)
|
||||
}
|
||||
|
||||
// SaveTo writes content to file system.
|
||||
func (f *File) SaveTo(filename string) error {
|
||||
return f.SaveToIndent(filename, "")
|
||||
}
|
223
vendor/gopkg.in/ini.v1/ini.go
generated
vendored
Normal file
223
vendor/gopkg.in/ini.v1/ini.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
// +build go1.6
|
||||
|
||||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// Package ini provides INI file read and write functionality in Go.
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSection is the name of default section. You can use this constant or the string literal.
|
||||
// In most of cases, an empty string is all you need to access the section.
|
||||
DefaultSection = "DEFAULT"
|
||||
// Deprecated: Use "DefaultSection" instead.
|
||||
DEFAULT_SECTION = DefaultSection
|
||||
|
||||
// Maximum allowed depth when recursively substituing variable names.
|
||||
depthValues = 99
|
||||
version = "1.44.0"
|
||||
)
|
||||
|
||||
// Version returns current package version literal.
|
||||
func Version() string {
|
||||
return version
|
||||
}
|
||||
|
||||
var (
|
||||
// LineBreak is the delimiter to determine or compose a new line.
|
||||
// This variable will be changed to "\r\n" automatically on Windows at package init time.
|
||||
LineBreak = "\n"
|
||||
|
||||
// DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
|
||||
DefaultFormatLeft = ""
|
||||
// DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
|
||||
DefaultFormatRight = ""
|
||||
|
||||
// Variable regexp pattern: %(variable)s
|
||||
varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
|
||||
|
||||
// PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
|
||||
// or reduce all possible spaces for compact format.
|
||||
PrettyFormat = true
|
||||
|
||||
// PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
|
||||
PrettyEqual = false
|
||||
|
||||
// DefaultHeader explicitly writes default section header.
|
||||
DefaultHeader = false
|
||||
|
||||
// PrettySection indicates whether to put a line between sections.
|
||||
PrettySection = true
|
||||
)
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
LineBreak = "\r\n"
|
||||
}
|
||||
}
|
||||
|
||||
func inSlice(str string, s []string) bool {
|
||||
for _, v := range s {
|
||||
if str == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// dataSource is an interface that returns object which can be read and closed.
|
||||
type dataSource interface {
|
||||
ReadCloser() (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// sourceFile represents an object that contains content on the local file system.
|
||||
type sourceFile struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
|
||||
return os.Open(s.name)
|
||||
}
|
||||
|
||||
// sourceData represents an object that contains content in memory.
|
||||
type sourceData struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
|
||||
}
|
||||
|
||||
// sourceReadCloser represents an input stream with Close method.
|
||||
type sourceReadCloser struct {
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
|
||||
return s.reader, nil
|
||||
}
|
||||
|
||||
func parseDataSource(source interface{}) (dataSource, error) {
|
||||
switch s := source.(type) {
|
||||
case string:
|
||||
return sourceFile{s}, nil
|
||||
case []byte:
|
||||
return &sourceData{s}, nil
|
||||
case io.ReadCloser:
|
||||
return &sourceReadCloser{s}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadOptions contains all customized options used for load data source(s).
|
||||
type LoadOptions struct {
|
||||
// Loose indicates whether the parser should ignore nonexistent files or return error.
|
||||
Loose bool
|
||||
// Insensitive indicates whether the parser forces all section and key names to lowercase.
|
||||
Insensitive bool
|
||||
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
|
||||
IgnoreContinuation bool
|
||||
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
|
||||
IgnoreInlineComment bool
|
||||
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
|
||||
SkipUnrecognizableLines bool
|
||||
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
|
||||
// This type of keys are mostly used in my.cnf.
|
||||
AllowBooleanKeys bool
|
||||
// AllowShadows indicates whether to keep track of keys with same name under same section.
|
||||
AllowShadows bool
|
||||
// AllowNestedValues indicates whether to allow AWS-like nested values.
|
||||
// Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
|
||||
AllowNestedValues bool
|
||||
// AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
|
||||
// Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
|
||||
// Relevant quote: Values can also span multiple lines, as long as they are indented deeper
|
||||
// than the first line of the value.
|
||||
AllowPythonMultilineValues bool
|
||||
// SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
|
||||
// Docs: https://docs.python.org/2/library/configparser.html
|
||||
// Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
|
||||
// In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
|
||||
SpaceBeforeInlineComment bool
|
||||
// UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
|
||||
// when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
|
||||
UnescapeValueDoubleQuotes bool
|
||||
// UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
|
||||
// when value is NOT surrounded by any quotes.
|
||||
// Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
|
||||
UnescapeValueCommentSymbols bool
|
||||
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
|
||||
// conform to key/value pairs. Specify the names of those blocks here.
|
||||
UnparseableSections []string
|
||||
// KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
|
||||
KeyValueDelimiters string
|
||||
// PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
|
||||
PreserveSurroundedQuote bool
|
||||
}
|
||||
|
||||
// LoadSources allows caller to apply customized options for loading from data source(s).
|
||||
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
|
||||
sources := make([]dataSource, len(others)+1)
|
||||
sources[0], err = parseDataSource(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := range others {
|
||||
sources[i+1], err = parseDataSource(others[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
f := newFile(sources, opts)
|
||||
if err = f.Reload(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Load loads and parses from INI data sources.
|
||||
// Arguments can be mixed of file name with string type, or raw data in []byte.
|
||||
// It will return error if list contains nonexistent files.
|
||||
func Load(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{}, source, others...)
|
||||
}
|
||||
|
||||
// LooseLoad has exactly same functionality as Load function
|
||||
// except it ignores nonexistent files instead of returning error.
|
||||
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{Loose: true}, source, others...)
|
||||
}
|
||||
|
||||
// InsensitiveLoad has exactly same functionality as Load function
|
||||
// except it forces all section and key names to be lowercased.
|
||||
func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
|
||||
}
|
||||
|
||||
// ShadowLoad has exactly same functionality as Load function
|
||||
// except it allows have shadow keys.
|
||||
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
|
||||
}
|
753
vendor/gopkg.in/ini.v1/key.go
generated
vendored
Normal file
753
vendor/gopkg.in/ini.v1/key.go
generated
vendored
Normal file
@ -0,0 +1,753 @@
|
||||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Key represents a key under a section.
|
||||
type Key struct {
|
||||
s *Section
|
||||
Comment string
|
||||
name string
|
||||
value string
|
||||
isAutoIncrement bool
|
||||
isBooleanType bool
|
||||
|
||||
isShadow bool
|
||||
shadows []*Key
|
||||
|
||||
nestedValues []string
|
||||
}
|
||||
|
||||
// newKey simply return a key object with given values.
|
||||
func newKey(s *Section, name, val string) *Key {
|
||||
return &Key{
|
||||
s: s,
|
||||
name: name,
|
||||
value: val,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Key) addShadow(val string) error {
|
||||
if k.isShadow {
|
||||
return errors.New("cannot add shadow to another shadow key")
|
||||
} else if k.isAutoIncrement || k.isBooleanType {
|
||||
return errors.New("cannot add shadow to auto-increment or boolean key")
|
||||
}
|
||||
|
||||
shadow := newKey(k.s, k.name, val)
|
||||
shadow.isShadow = true
|
||||
k.shadows = append(k.shadows, shadow)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddShadow adds a new shadow key to itself.
|
||||
func (k *Key) AddShadow(val string) error {
|
||||
if !k.s.f.options.AllowShadows {
|
||||
return errors.New("shadow key is not allowed")
|
||||
}
|
||||
return k.addShadow(val)
|
||||
}
|
||||
|
||||
func (k *Key) addNestedValue(val string) error {
|
||||
if k.isAutoIncrement || k.isBooleanType {
|
||||
return errors.New("cannot add nested value to auto-increment or boolean key")
|
||||
}
|
||||
|
||||
k.nestedValues = append(k.nestedValues, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddNestedValue adds a nested value to the key.
|
||||
func (k *Key) AddNestedValue(val string) error {
|
||||
if !k.s.f.options.AllowNestedValues {
|
||||
return errors.New("nested value is not allowed")
|
||||
}
|
||||
return k.addNestedValue(val)
|
||||
}
|
||||
|
||||
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
|
||||
type ValueMapper func(string) string
|
||||
|
||||
// Name returns name of key.
|
||||
func (k *Key) Name() string {
|
||||
return k.name
|
||||
}
|
||||
|
||||
// Value returns raw value of key for performance purpose.
|
||||
func (k *Key) Value() string {
|
||||
return k.value
|
||||
}
|
||||
|
||||
// ValueWithShadows returns raw values of key and its shadows if any.
|
||||
func (k *Key) ValueWithShadows() []string {
|
||||
if len(k.shadows) == 0 {
|
||||
return []string{k.value}
|
||||
}
|
||||
vals := make([]string, len(k.shadows)+1)
|
||||
vals[0] = k.value
|
||||
for i := range k.shadows {
|
||||
vals[i+1] = k.shadows[i].value
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// NestedValues returns nested values stored in the key.
|
||||
// It is possible returned value is nil if no nested values stored in the key.
|
||||
func (k *Key) NestedValues() []string {
|
||||
return k.nestedValues
|
||||
}
|
||||
|
||||
// transformValue takes a raw value and transforms to its final string.
|
||||
func (k *Key) transformValue(val string) string {
|
||||
if k.s.f.ValueMapper != nil {
|
||||
val = k.s.f.ValueMapper(val)
|
||||
}
|
||||
|
||||
// Fail-fast if no indicate char found for recursive value
|
||||
if !strings.Contains(val, "%") {
|
||||
return val
|
||||
}
|
||||
for i := 0; i < depthValues; i++ {
|
||||
vr := varPattern.FindString(val)
|
||||
if len(vr) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Take off leading '%(' and trailing ')s'.
|
||||
noption := vr[2 : len(vr)-2]
|
||||
|
||||
// Search in the same section.
|
||||
nk, err := k.s.GetKey(noption)
|
||||
if err != nil || k == nk {
|
||||
// Search again in default section.
|
||||
nk, _ = k.s.f.Section("").GetKey(noption)
|
||||
}
|
||||
|
||||
// Substitute by new value and take off leading '%(' and trailing ')s'.
|
||||
val = strings.Replace(val, vr, nk.value, -1)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// String returns string representation of value.
|
||||
func (k *Key) String() string {
|
||||
return k.transformValue(k.value)
|
||||
}
|
||||
|
||||
// Validate accepts a validate function which can
|
||||
// return modifed result as key value.
|
||||
func (k *Key) Validate(fn func(string) string) string {
|
||||
return fn(k.String())
|
||||
}
|
||||
|
||||
// parseBool returns the boolean value represented by the string.
|
||||
//
|
||||
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
|
||||
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
|
||||
// Any other value returns an error.
|
||||
func parseBool(str string) (value bool, err error) {
|
||||
switch str {
|
||||
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
|
||||
return true, nil
|
||||
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
|
||||
}
|
||||
|
||||
// Bool returns bool type value.
|
||||
func (k *Key) Bool() (bool, error) {
|
||||
return parseBool(k.String())
|
||||
}
|
||||
|
||||
// Float64 returns float64 type value.
|
||||
func (k *Key) Float64() (float64, error) {
|
||||
return strconv.ParseFloat(k.String(), 64)
|
||||
}
|
||||
|
||||
// Int returns int type value.
|
||||
func (k *Key) Int() (int, error) {
|
||||
v, err := strconv.ParseInt(k.String(), 0, 64)
|
||||
return int(v), err
|
||||
}
|
||||
|
||||
// Int64 returns int64 type value.
|
||||
func (k *Key) Int64() (int64, error) {
|
||||
return strconv.ParseInt(k.String(), 0, 64)
|
||||
}
|
||||
|
||||
// Uint returns uint type valued.
|
||||
func (k *Key) Uint() (uint, error) {
|
||||
u, e := strconv.ParseUint(k.String(), 0, 64)
|
||||
return uint(u), e
|
||||
}
|
||||
|
||||
// Uint64 returns uint64 type value.
|
||||
func (k *Key) Uint64() (uint64, error) {
|
||||
return strconv.ParseUint(k.String(), 0, 64)
|
||||
}
|
||||
|
||||
// Duration returns time.Duration type value.
|
||||
func (k *Key) Duration() (time.Duration, error) {
|
||||
return time.ParseDuration(k.String())
|
||||
}
|
||||
|
||||
// TimeFormat parses with given format and returns time.Time type value.
|
||||
func (k *Key) TimeFormat(format string) (time.Time, error) {
|
||||
return time.Parse(format, k.String())
|
||||
}
|
||||
|
||||
// Time parses with RFC3339 format and returns time.Time type value.
|
||||
func (k *Key) Time() (time.Time, error) {
|
||||
return k.TimeFormat(time.RFC3339)
|
||||
}
|
||||
|
||||
// MustString returns default value if key value is empty.
|
||||
func (k *Key) MustString(defaultVal string) string {
|
||||
val := k.String()
|
||||
if len(val) == 0 {
|
||||
k.value = defaultVal
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustBool always returns value without error,
|
||||
// it returns false if error occurs.
|
||||
func (k *Key) MustBool(defaultVal ...bool) bool {
|
||||
val, err := k.Bool()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatBool(defaultVal[0])
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustFloat64 always returns value without error,
|
||||
// it returns 0.0 if error occurs.
|
||||
func (k *Key) MustFloat64(defaultVal ...float64) float64 {
|
||||
val, err := k.Float64()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustInt always returns value without error,
|
||||
// it returns 0 if error occurs.
|
||||
func (k *Key) MustInt(defaultVal ...int) int {
|
||||
val, err := k.Int()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustInt64 always returns value without error,
|
||||
// it returns 0 if error occurs.
|
||||
func (k *Key) MustInt64(defaultVal ...int64) int64 {
|
||||
val, err := k.Int64()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatInt(defaultVal[0], 10)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustUint always returns value without error,
|
||||
// it returns 0 if error occurs.
|
||||
func (k *Key) MustUint(defaultVal ...uint) uint {
|
||||
val, err := k.Uint()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustUint64 always returns value without error,
|
||||
// it returns 0 if error occurs.
|
||||
func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
|
||||
val, err := k.Uint64()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = strconv.FormatUint(defaultVal[0], 10)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustDuration always returns value without error,
|
||||
// it returns zero value if error occurs.
|
||||
func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
|
||||
val, err := k.Duration()
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = defaultVal[0].String()
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustTimeFormat always parses with given format and returns value without error,
|
||||
// it returns zero value if error occurs.
|
||||
func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
|
||||
val, err := k.TimeFormat(format)
|
||||
if len(defaultVal) > 0 && err != nil {
|
||||
k.value = defaultVal[0].Format(format)
|
||||
return defaultVal[0]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// MustTime always parses with RFC3339 format and returns value without error,
|
||||
// it returns zero value if error occurs.
|
||||
func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
|
||||
return k.MustTimeFormat(time.RFC3339, defaultVal...)
|
||||
}
|
||||
|
||||
// In always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) In(defaultVal string, candidates []string) string {
|
||||
val := k.String()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InFloat64 always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
|
||||
val := k.MustFloat64()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InInt always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InInt(defaultVal int, candidates []int) int {
|
||||
val := k.MustInt()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InInt64 always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
|
||||
val := k.MustInt64()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InUint always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
|
||||
val := k.MustUint()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InUint64 always returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
|
||||
val := k.MustUint64()
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InTimeFormat always parses with given format and returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
|
||||
val := k.MustTimeFormat(format)
|
||||
for _, cand := range candidates {
|
||||
if val == cand {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
// InTime always parses with RFC3339 format and returns value without error,
|
||||
// it returns default value if error occurs or doesn't fit into candidates.
|
||||
func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
|
||||
return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
|
||||
}
|
||||
|
||||
// RangeFloat64 checks if value is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
|
||||
val := k.MustFloat64()
|
||||
if val < min || val > max {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// RangeInt checks if value is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeInt(defaultVal, min, max int) int {
|
||||
val := k.MustInt()
|
||||
if val < min || val > max {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// RangeInt64 checks if value is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
|
||||
val := k.MustInt64()
|
||||
if val < min || val > max {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// RangeTimeFormat checks if value with given format is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
|
||||
val := k.MustTimeFormat(format)
|
||||
if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// RangeTime checks if value with RFC3339 format is in given range inclusively,
|
||||
// and returns default value if it's not.
|
||||
func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
|
||||
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
|
||||
}
|
||||
|
||||
// Strings returns list of string divided by given delimiter.
|
||||
func (k *Key) Strings(delim string) []string {
|
||||
str := k.String()
|
||||
if len(str) == 0 {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
runes := []rune(str)
|
||||
vals := make([]string, 0, 2)
|
||||
var buf bytes.Buffer
|
||||
escape := false
|
||||
idx := 0
|
||||
for {
|
||||
if escape {
|
||||
escape = false
|
||||
if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
|
||||
buf.WriteRune('\\')
|
||||
}
|
||||
buf.WriteRune(runes[idx])
|
||||
} else {
|
||||
if runes[idx] == '\\' {
|
||||
escape = true
|
||||
} else if strings.HasPrefix(string(runes[idx:]), delim) {
|
||||
idx += len(delim) - 1
|
||||
vals = append(vals, strings.TrimSpace(buf.String()))
|
||||
buf.Reset()
|
||||
} else {
|
||||
buf.WriteRune(runes[idx])
|
||||
}
|
||||
}
|
||||
idx++
|
||||
if idx == len(runes) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if buf.Len() > 0 {
|
||||
vals = append(vals, strings.TrimSpace(buf.String()))
|
||||
}
|
||||
|
||||
return vals
|
||||
}
|
||||
|
||||
// StringsWithShadows returns list of string divided by given delimiter.
|
||||
// Shadows will also be appended if any.
|
||||
func (k *Key) StringsWithShadows(delim string) []string {
|
||||
vals := k.ValueWithShadows()
|
||||
results := make([]string, 0, len(vals)*2)
|
||||
for i := range vals {
|
||||
if len(vals) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
results = append(results, strings.Split(vals[i], delim)...)
|
||||
}
|
||||
|
||||
for i := range results {
|
||||
results[i] = k.transformValue(strings.TrimSpace(results[i]))
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Float64s(delim string) []float64 {
|
||||
vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Ints(delim string) []int {
|
||||
vals, _ := k.parseInts(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Int64s(delim string) []int64 {
|
||||
vals, _ := k.parseInt64s(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Uints(delim string) []uint {
|
||||
vals, _ := k.parseUints(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
|
||||
func (k *Key) Uint64s(delim string) []uint64 {
|
||||
vals, _ := k.parseUint64s(k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
|
||||
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
|
||||
func (k *Key) TimesFormat(format, delim string) []time.Time {
|
||||
vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
|
||||
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
|
||||
func (k *Key) Times(delim string) []time.Time {
|
||||
return k.TimesFormat(time.RFC3339, delim)
|
||||
}
|
||||
|
||||
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
|
||||
// it will not be included to result list.
|
||||
func (k *Key) ValidFloat64s(delim string) []float64 {
|
||||
vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
|
||||
// not be included to result list.
|
||||
func (k *Key) ValidInts(delim string) []int {
|
||||
vals, _ := k.parseInts(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
|
||||
// then it will not be included to result list.
|
||||
func (k *Key) ValidInt64s(delim string) []int64 {
|
||||
vals, _ := k.parseInt64s(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
|
||||
// then it will not be included to result list.
|
||||
func (k *Key) ValidUints(delim string) []uint {
|
||||
vals, _ := k.parseUints(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
|
||||
// integer, then it will not be included to result list.
|
||||
func (k *Key) ValidUint64s(delim string) []uint64 {
|
||||
vals, _ := k.parseUint64s(k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
|
||||
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
|
||||
vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
|
||||
return vals
|
||||
}
|
||||
|
||||
// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
|
||||
func (k *Key) ValidTimes(delim string) []time.Time {
|
||||
return k.ValidTimesFormat(time.RFC3339, delim)
|
||||
}
|
||||
|
||||
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
|
||||
return k.parseFloat64s(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictInts(delim string) ([]int, error) {
|
||||
return k.parseInts(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
|
||||
return k.parseInt64s(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictUints(delim string) ([]uint, error) {
|
||||
return k.parseUints(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
|
||||
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
|
||||
return k.parseUint64s(k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
|
||||
// or error on first invalid input.
|
||||
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
|
||||
return k.parseTimesFormat(format, k.Strings(delim), false, true)
|
||||
}
|
||||
|
||||
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
|
||||
// or error on first invalid input.
|
||||
func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
|
||||
return k.StrictTimesFormat(time.RFC3339, delim)
|
||||
}
|
||||
|
||||
// parseFloat64s transforms strings to float64s.
|
||||
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
|
||||
vals := make([]float64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseFloat(str, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseInts transforms strings to ints.
|
||||
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
|
||||
vals := make([]int, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
valInt64, err := strconv.ParseInt(str, 0, 64)
|
||||
val := int(valInt64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseInt64s transforms strings to int64s.
|
||||
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
|
||||
vals := make([]int64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseInt(str, 0, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseUints transforms strings to uints.
|
||||
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
|
||||
vals := make([]uint, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseUint(str, 0, 0)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, uint(val))
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseUint64s transforms strings to uint64s.
|
||||
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
|
||||
vals := make([]uint64, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := strconv.ParseUint(str, 0, 64)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// parseTimesFormat transforms strings to times in given format.
|
||||
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
|
||||
vals := make([]time.Time, 0, len(strs))
|
||||
for _, str := range strs {
|
||||
val, err := time.Parse(format, str)
|
||||
if err != nil && returnOnInvalid {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil || addInvalid {
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// SetValue changes key value.
|
||||
func (k *Key) SetValue(v string) {
|
||||
if k.s.f.BlockMode {
|
||||
k.s.f.lock.Lock()
|
||||
defer k.s.f.lock.Unlock()
|
||||
}
|
||||
|
||||
k.value = v
|
||||
k.s.keysHash[k.name] = v
|
||||
}
|
487
vendor/gopkg.in/ini.v1/parser.go
generated
vendored
Normal file
487
vendor/gopkg.in/ini.v1/parser.go
generated
vendored
Normal file
@ -0,0 +1,487 @@
|
||||
// Copyright 2015 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)")
|
||||
|
||||
type parserOptions struct {
|
||||
IgnoreContinuation bool
|
||||
IgnoreInlineComment bool
|
||||
AllowPythonMultilineValues bool
|
||||
SpaceBeforeInlineComment bool
|
||||
UnescapeValueDoubleQuotes bool
|
||||
UnescapeValueCommentSymbols bool
|
||||
PreserveSurroundedQuote bool
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
buf *bufio.Reader
|
||||
options parserOptions
|
||||
|
||||
isEOF bool
|
||||
count int
|
||||
comment *bytes.Buffer
|
||||
}
|
||||
|
||||
func newParser(r io.Reader, opts parserOptions) *parser {
|
||||
return &parser{
|
||||
buf: bufio.NewReader(r),
|
||||
options: opts,
|
||||
count: 1,
|
||||
comment: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
|
||||
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
|
||||
func (p *parser) BOM() error {
|
||||
mask, err := p.buf.Peek(2)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
} else if len(mask) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case mask[0] == 254 && mask[1] == 255:
|
||||
fallthrough
|
||||
case mask[0] == 255 && mask[1] == 254:
|
||||
p.buf.Read(mask)
|
||||
case mask[0] == 239 && mask[1] == 187:
|
||||
mask, err := p.buf.Peek(3)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
} else if len(mask) < 3 {
|
||||
return nil
|
||||
}
|
||||
if mask[2] == 191 {
|
||||
p.buf.Read(mask)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *parser) readUntil(delim byte) ([]byte, error) {
|
||||
data, err := p.buf.ReadBytes(delim)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
p.isEOF = true
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func cleanComment(in []byte) ([]byte, bool) {
|
||||
i := bytes.IndexAny(in, "#;")
|
||||
if i == -1 {
|
||||
return nil, false
|
||||
}
|
||||
return in[i:], true
|
||||
}
|
||||
|
||||
func readKeyName(delimiters string, in []byte) (string, int, error) {
|
||||
line := string(in)
|
||||
|
||||
// Check if key name surrounded by quotes.
|
||||
var keyQuote string
|
||||
if line[0] == '"' {
|
||||
if len(line) > 6 && string(line[0:3]) == `"""` {
|
||||
keyQuote = `"""`
|
||||
} else {
|
||||
keyQuote = `"`
|
||||
}
|
||||
} else if line[0] == '`' {
|
||||
keyQuote = "`"
|
||||
}
|
||||
|
||||
// Get out key name
|
||||
endIdx := -1
|
||||
if len(keyQuote) > 0 {
|
||||
startIdx := len(keyQuote)
|
||||
// FIXME: fail case -> """"""name"""=value
|
||||
pos := strings.Index(line[startIdx:], keyQuote)
|
||||
if pos == -1 {
|
||||
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
|
||||
}
|
||||
pos += startIdx
|
||||
|
||||
// Find key-value delimiter
|
||||
i := strings.IndexAny(line[pos+startIdx:], delimiters)
|
||||
if i < 0 {
|
||||
return "", -1, ErrDelimiterNotFound{line}
|
||||
}
|
||||
endIdx = pos + i
|
||||
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
|
||||
}
|
||||
|
||||
endIdx = strings.IndexAny(line, delimiters)
|
||||
if endIdx < 0 {
|
||||
return "", -1, ErrDelimiterNotFound{line}
|
||||
}
|
||||
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
|
||||
}
|
||||
|
||||
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
|
||||
for {
|
||||
data, err := p.readUntil('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
next := string(data)
|
||||
|
||||
pos := strings.LastIndex(next, valQuote)
|
||||
if pos > -1 {
|
||||
val += next[:pos]
|
||||
|
||||
comment, has := cleanComment([]byte(next[pos:]))
|
||||
if has {
|
||||
p.comment.Write(bytes.TrimSpace(comment))
|
||||
}
|
||||
break
|
||||
}
|
||||
val += next
|
||||
if p.isEOF {
|
||||
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
|
||||
}
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (p *parser) readContinuationLines(val string) (string, error) {
|
||||
for {
|
||||
data, err := p.readUntil('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
next := strings.TrimSpace(string(data))
|
||||
|
||||
if len(next) == 0 {
|
||||
break
|
||||
}
|
||||
val += next
|
||||
if val[len(val)-1] != '\\' {
|
||||
break
|
||||
}
|
||||
val = val[:len(val)-1]
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// hasSurroundedQuote check if and only if the first and last characters
|
||||
// are quotes \" or \'.
|
||||
// It returns false if any other parts also contain same kind of quotes.
|
||||
func hasSurroundedQuote(in string, quote byte) bool {
|
||||
return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
|
||||
strings.IndexByte(in[1:], quote) == len(in)-2
|
||||
}
|
||||
|
||||
func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
|
||||
|
||||
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
|
||||
if len(line) == 0 {
|
||||
if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
|
||||
return p.readPythonMultilines(line, bufferSize)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var valQuote string
|
||||
if len(line) > 3 && string(line[0:3]) == `"""` {
|
||||
valQuote = `"""`
|
||||
} else if line[0] == '`' {
|
||||
valQuote = "`"
|
||||
} else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
|
||||
valQuote = `"`
|
||||
}
|
||||
|
||||
if len(valQuote) > 0 {
|
||||
startIdx := len(valQuote)
|
||||
pos := strings.LastIndex(line[startIdx:], valQuote)
|
||||
// Check for multi-line value
|
||||
if pos == -1 {
|
||||
return p.readMultilines(line, line[startIdx:], valQuote)
|
||||
}
|
||||
|
||||
if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
|
||||
return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
|
||||
}
|
||||
return line[startIdx : pos+startIdx], nil
|
||||
}
|
||||
|
||||
lastChar := line[len(line)-1]
|
||||
// Won't be able to reach here if value only contains whitespace
|
||||
line = strings.TrimSpace(line)
|
||||
trimmedLastChar := line[len(line)-1]
|
||||
|
||||
// Check continuation lines when desired
|
||||
if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
|
||||
return p.readContinuationLines(line[:len(line)-1])
|
||||
}
|
||||
|
||||
// Check if ignore inline comment
|
||||
if !p.options.IgnoreInlineComment {
|
||||
var i int
|
||||
if p.options.SpaceBeforeInlineComment {
|
||||
i = strings.Index(line, " #")
|
||||
if i == -1 {
|
||||
i = strings.Index(line, " ;")
|
||||
}
|
||||
|
||||
} else {
|
||||
i = strings.IndexAny(line, "#;")
|
||||
}
|
||||
|
||||
if i > -1 {
|
||||
p.comment.WriteString(line[i:])
|
||||
line = strings.TrimSpace(line[:i])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Trim single and double quotes
|
||||
if (hasSurroundedQuote(line, '\'') ||
|
||||
hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
|
||||
line = line[1 : len(line)-1]
|
||||
} else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
|
||||
if strings.Contains(line, `\;`) {
|
||||
line = strings.Replace(line, `\;`, ";", -1)
|
||||
}
|
||||
if strings.Contains(line, `\#`) {
|
||||
line = strings.Replace(line, `\#`, "#", -1)
|
||||
}
|
||||
} else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
|
||||
return p.readPythonMultilines(line, bufferSize)
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
||||
|
||||
func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
|
||||
parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
|
||||
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
|
||||
|
||||
for {
|
||||
peekData, peekErr := peekBuffer.ReadBytes('\n')
|
||||
if peekErr != nil {
|
||||
if peekErr == io.EOF {
|
||||
return line, nil
|
||||
}
|
||||
return "", peekErr
|
||||
}
|
||||
|
||||
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
|
||||
if len(peekMatches) != 3 {
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// NOTE: Return if not a python-ini multi-line value.
|
||||
currentIdentSize := len(peekMatches[1])
|
||||
if currentIdentSize <= 0 {
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer.
|
||||
_, err := p.readUntil('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
line += fmt.Sprintf("\n%s", peekMatches[2])
|
||||
}
|
||||
}
|
||||
|
||||
// parse parses data through an io.Reader.
|
||||
func (f *File) parse(reader io.Reader) (err error) {
|
||||
p := newParser(reader, parserOptions{
|
||||
IgnoreContinuation: f.options.IgnoreContinuation,
|
||||
IgnoreInlineComment: f.options.IgnoreInlineComment,
|
||||
AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
|
||||
SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
|
||||
UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
|
||||
UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
|
||||
PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
|
||||
})
|
||||
if err = p.BOM(); err != nil {
|
||||
return fmt.Errorf("BOM: %v", err)
|
||||
}
|
||||
|
||||
// Ignore error because default section name is never empty string.
|
||||
name := DefaultSection
|
||||
if f.options.Insensitive {
|
||||
name = strings.ToLower(DefaultSection)
|
||||
}
|
||||
section, _ := f.NewSection(name)
|
||||
|
||||
// This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
|
||||
var isLastValueEmpty bool
|
||||
var lastRegularKey *Key
|
||||
|
||||
var line []byte
|
||||
var inUnparseableSection bool
|
||||
|
||||
// NOTE: Iterate and increase `currentPeekSize` until
|
||||
// the size of the parser buffer is found.
|
||||
// TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
|
||||
parserBufferSize := 0
|
||||
// NOTE: Peek 1kb at a time.
|
||||
currentPeekSize := 1024
|
||||
|
||||
if f.options.AllowPythonMultilineValues {
|
||||
for {
|
||||
peekBytes, _ := p.buf.Peek(currentPeekSize)
|
||||
peekBytesLength := len(peekBytes)
|
||||
|
||||
if parserBufferSize >= peekBytesLength {
|
||||
break
|
||||
}
|
||||
|
||||
currentPeekSize *= 2
|
||||
parserBufferSize = peekBytesLength
|
||||
}
|
||||
}
|
||||
|
||||
for !p.isEOF {
|
||||
line, err = p.readUntil('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if f.options.AllowNestedValues &&
|
||||
isLastValueEmpty && len(line) > 0 {
|
||||
if line[0] == ' ' || line[0] == '\t' {
|
||||
lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Comments
|
||||
if line[0] == '#' || line[0] == ';' {
|
||||
// Note: we do not care ending line break,
|
||||
// it is needed for adding second line,
|
||||
// so just clean it once at the end when set to value.
|
||||
p.comment.Write(line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Section
|
||||
if line[0] == '[' {
|
||||
// Read to the next ']' (TODO: support quoted strings)
|
||||
closeIdx := bytes.LastIndexByte(line, ']')
|
||||
if closeIdx == -1 {
|
||||
return fmt.Errorf("unclosed section: %s", line)
|
||||
}
|
||||
|
||||
name := string(line[1:closeIdx])
|
||||
section, err = f.NewSection(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
comment, has := cleanComment(line[closeIdx+1:])
|
||||
if has {
|
||||
p.comment.Write(comment)
|
||||
}
|
||||
|
||||
section.Comment = strings.TrimSpace(p.comment.String())
|
||||
|
||||
// Reset aotu-counter and comments
|
||||
p.comment.Reset()
|
||||
p.count = 1
|
||||
|
||||
inUnparseableSection = false
|
||||
for i := range f.options.UnparseableSections {
|
||||
if f.options.UnparseableSections[i] == name ||
|
||||
(f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
|
||||
inUnparseableSection = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if inUnparseableSection {
|
||||
section.isRawSection = true
|
||||
section.rawBody += string(line)
|
||||
continue
|
||||
}
|
||||
|
||||
kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
|
||||
if err != nil {
|
||||
// Treat as boolean key when desired, and whole line is key name.
|
||||
if IsErrDelimiterNotFound(err) {
|
||||
switch {
|
||||
case f.options.AllowBooleanKeys:
|
||||
kname, err := p.readValue(line, parserBufferSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key, err := section.NewBooleanKey(kname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
continue
|
||||
|
||||
case f.options.SkipUnrecognizableLines:
|
||||
continue
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Auto increment.
|
||||
isAutoIncr := false
|
||||
if kname == "-" {
|
||||
isAutoIncr = true
|
||||
kname = "#" + strconv.Itoa(p.count)
|
||||
p.count++
|
||||
}
|
||||
|
||||
value, err := p.readValue(line[offset:], parserBufferSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isLastValueEmpty = len(value) == 0
|
||||
|
||||
key, err := section.NewKey(kname, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.isAutoIncrement = isAutoIncr
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
lastRegularKey = key
|
||||
}
|
||||
return nil
|
||||
}
|
256
vendor/gopkg.in/ini.v1/section.go
generated
vendored
Normal file
256
vendor/gopkg.in/ini.v1/section.go
generated
vendored
Normal file
@ -0,0 +1,256 @@
|
||||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Section represents a config section.
|
||||
type Section struct {
|
||||
f *File
|
||||
Comment string
|
||||
name string
|
||||
keys map[string]*Key
|
||||
keyList []string
|
||||
keysHash map[string]string
|
||||
|
||||
isRawSection bool
|
||||
rawBody string
|
||||
}
|
||||
|
||||
func newSection(f *File, name string) *Section {
|
||||
return &Section{
|
||||
f: f,
|
||||
name: name,
|
||||
keys: make(map[string]*Key),
|
||||
keyList: make([]string, 0, 10),
|
||||
keysHash: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns name of Section.
|
||||
func (s *Section) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// Body returns rawBody of Section if the section was marked as unparseable.
|
||||
// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
|
||||
func (s *Section) Body() string {
|
||||
return strings.TrimSpace(s.rawBody)
|
||||
}
|
||||
|
||||
// SetBody updates body content only if section is raw.
|
||||
func (s *Section) SetBody(body string) {
|
||||
if !s.isRawSection {
|
||||
return
|
||||
}
|
||||
s.rawBody = body
|
||||
}
|
||||
|
||||
// NewKey creates a new key to given section.
|
||||
func (s *Section) NewKey(name, val string) (*Key, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, errors.New("error creating new key: empty key name")
|
||||
} else if s.f.options.Insensitive {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.Lock()
|
||||
defer s.f.lock.Unlock()
|
||||
}
|
||||
|
||||
if inSlice(name, s.keyList) {
|
||||
if s.f.options.AllowShadows {
|
||||
if err := s.keys[name].addShadow(val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
s.keys[name].value = val
|
||||
s.keysHash[name] = val
|
||||
}
|
||||
return s.keys[name], nil
|
||||
}
|
||||
|
||||
s.keyList = append(s.keyList, name)
|
||||
s.keys[name] = newKey(s, name, val)
|
||||
s.keysHash[name] = val
|
||||
return s.keys[name], nil
|
||||
}
|
||||
|
||||
// NewBooleanKey creates a new boolean type key to given section.
|
||||
func (s *Section) NewBooleanKey(name string) (*Key, error) {
|
||||
key, err := s.NewKey(name, "true")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key.isBooleanType = true
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// GetKey returns key in section by given name.
|
||||
func (s *Section) GetKey(name string) (*Key, error) {
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.RLock()
|
||||
}
|
||||
if s.f.options.Insensitive {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
key := s.keys[name]
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.RUnlock()
|
||||
}
|
||||
|
||||
if key == nil {
|
||||
// Check if it is a child-section.
|
||||
sname := s.name
|
||||
for {
|
||||
if i := strings.LastIndex(sname, "."); i > -1 {
|
||||
sname = sname[:i]
|
||||
sec, err := s.f.GetSection(sname)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return sec.GetKey(name)
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// HasKey returns true if section contains a key with given name.
|
||||
func (s *Section) HasKey(name string) bool {
|
||||
key, _ := s.GetKey(name)
|
||||
return key != nil
|
||||
}
|
||||
|
||||
// Deprecated: Use "HasKey" instead.
|
||||
func (s *Section) Haskey(name string) bool {
|
||||
return s.HasKey(name)
|
||||
}
|
||||
|
||||
// HasValue returns true if section contains given raw value.
|
||||
func (s *Section) HasValue(value string) bool {
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.RLock()
|
||||
defer s.f.lock.RUnlock()
|
||||
}
|
||||
|
||||
for _, k := range s.keys {
|
||||
if value == k.value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Key assumes named Key exists in section and returns a zero-value when not.
|
||||
func (s *Section) Key(name string) *Key {
|
||||
key, err := s.GetKey(name)
|
||||
if err != nil {
|
||||
// It's OK here because the only possible error is empty key name,
|
||||
// but if it's empty, this piece of code won't be executed.
|
||||
key, _ = s.NewKey(name, "")
|
||||
return key
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// Keys returns list of keys of section.
|
||||
func (s *Section) Keys() []*Key {
|
||||
keys := make([]*Key, len(s.keyList))
|
||||
for i := range s.keyList {
|
||||
keys[i] = s.Key(s.keyList[i])
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// ParentKeys returns list of keys of parent section.
|
||||
func (s *Section) ParentKeys() []*Key {
|
||||
var parentKeys []*Key
|
||||
sname := s.name
|
||||
for {
|
||||
if i := strings.LastIndex(sname, "."); i > -1 {
|
||||
sname = sname[:i]
|
||||
sec, err := s.f.GetSection(sname)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
parentKeys = append(parentKeys, sec.Keys()...)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
return parentKeys
|
||||
}
|
||||
|
||||
// KeyStrings returns list of key names of section.
|
||||
func (s *Section) KeyStrings() []string {
|
||||
list := make([]string, len(s.keyList))
|
||||
copy(list, s.keyList)
|
||||
return list
|
||||
}
|
||||
|
||||
// KeysHash returns keys hash consisting of names and values.
|
||||
func (s *Section) KeysHash() map[string]string {
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.RLock()
|
||||
defer s.f.lock.RUnlock()
|
||||
}
|
||||
|
||||
hash := map[string]string{}
|
||||
for key, value := range s.keysHash {
|
||||
hash[key] = value
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// DeleteKey deletes a key from section.
|
||||
func (s *Section) DeleteKey(name string) {
|
||||
if s.f.BlockMode {
|
||||
s.f.lock.Lock()
|
||||
defer s.f.lock.Unlock()
|
||||
}
|
||||
|
||||
for i, k := range s.keyList {
|
||||
if k == name {
|
||||
s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
|
||||
delete(s.keys, name)
|
||||
delete(s.keysHash, name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ChildSections returns a list of child sections of current section.
|
||||
// For example, "[parent.child1]" and "[parent.child12]" are child sections
|
||||
// of section "[parent]".
|
||||
func (s *Section) ChildSections() []*Section {
|
||||
prefix := s.name + "."
|
||||
children := make([]*Section, 0, 3)
|
||||
for _, name := range s.f.sectionList {
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
children = append(children, s.f.sections[name])
|
||||
}
|
||||
}
|
||||
return children
|
||||
}
|
548
vendor/gopkg.in/ini.v1/struct.go
generated
vendored
Normal file
548
vendor/gopkg.in/ini.v1/struct.go
generated
vendored
Normal file
@ -0,0 +1,548 @@
|
||||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// NameMapper represents a ini tag name mapper.
|
||||
type NameMapper func(string) string
|
||||
|
||||
// Built-in name getters.
|
||||
var (
|
||||
// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
|
||||
AllCapsUnderscore NameMapper = func(raw string) string {
|
||||
newstr := make([]rune, 0, len(raw))
|
||||
for i, chr := range raw {
|
||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||
if i > 0 {
|
||||
newstr = append(newstr, '_')
|
||||
}
|
||||
}
|
||||
newstr = append(newstr, unicode.ToUpper(chr))
|
||||
}
|
||||
return string(newstr)
|
||||
}
|
||||
// TitleUnderscore converts to format title_underscore.
|
||||
TitleUnderscore NameMapper = func(raw string) string {
|
||||
newstr := make([]rune, 0, len(raw))
|
||||
for i, chr := range raw {
|
||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||
if i > 0 {
|
||||
newstr = append(newstr, '_')
|
||||
}
|
||||
chr -= ('A' - 'a')
|
||||
}
|
||||
newstr = append(newstr, chr)
|
||||
}
|
||||
return string(newstr)
|
||||
}
|
||||
)
|
||||
|
||||
func (s *Section) parseFieldName(raw, actual string) string {
|
||||
if len(actual) > 0 {
|
||||
return actual
|
||||
}
|
||||
if s.f.NameMapper != nil {
|
||||
return s.f.NameMapper(raw)
|
||||
}
|
||||
return raw
|
||||
}
|
||||
|
||||
func parseDelim(actual string) string {
|
||||
if len(actual) > 0 {
|
||||
return actual
|
||||
}
|
||||
return ","
|
||||
}
|
||||
|
||||
var reflectTime = reflect.TypeOf(time.Now()).Kind()
|
||||
|
||||
// setSliceWithProperType sets proper values to slice based on its type.
|
||||
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
|
||||
var strs []string
|
||||
if allowShadow {
|
||||
strs = key.StringsWithShadows(delim)
|
||||
} else {
|
||||
strs = key.Strings(delim)
|
||||
}
|
||||
|
||||
numVals := len(strs)
|
||||
if numVals == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var vals interface{}
|
||||
var err error
|
||||
|
||||
sliceOf := field.Type().Elem().Kind()
|
||||
switch sliceOf {
|
||||
case reflect.String:
|
||||
vals = strs
|
||||
case reflect.Int:
|
||||
vals, err = key.parseInts(strs, true, false)
|
||||
case reflect.Int64:
|
||||
vals, err = key.parseInt64s(strs, true, false)
|
||||
case reflect.Uint:
|
||||
vals, err = key.parseUints(strs, true, false)
|
||||
case reflect.Uint64:
|
||||
vals, err = key.parseUint64s(strs, true, false)
|
||||
case reflect.Float64:
|
||||
vals, err = key.parseFloat64s(strs, true, false)
|
||||
case reflectTime:
|
||||
vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||
}
|
||||
if err != nil && isStrict {
|
||||
return err
|
||||
}
|
||||
|
||||
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
|
||||
for i := 0; i < numVals; i++ {
|
||||
switch sliceOf {
|
||||
case reflect.String:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
|
||||
case reflect.Int:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
|
||||
case reflect.Int64:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
|
||||
case reflect.Uint:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
|
||||
case reflect.Uint64:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
|
||||
case reflect.Float64:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
|
||||
case reflectTime:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
|
||||
}
|
||||
}
|
||||
field.Set(slice)
|
||||
return nil
|
||||
}
|
||||
|
||||
func wrapStrictError(err error, isStrict bool) error {
|
||||
if isStrict {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setWithProperType sets proper value to field based on its type,
|
||||
// but it does not return error for failing parsing,
|
||||
// because we want to use default value that is already assigned to struct.
|
||||
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if len(key.String()) == 0 {
|
||||
return nil
|
||||
}
|
||||
field.SetString(key.String())
|
||||
case reflect.Bool:
|
||||
boolVal, err := key.Bool()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetBool(boolVal)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
durationVal, err := key.Duration()
|
||||
// Skip zero value
|
||||
if err == nil && int64(durationVal) > 0 {
|
||||
field.Set(reflect.ValueOf(durationVal))
|
||||
return nil
|
||||
}
|
||||
|
||||
intVal, err := key.Int64()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetInt(intVal)
|
||||
// byte is an alias for uint8, so supporting uint8 breaks support for byte
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
durationVal, err := key.Duration()
|
||||
// Skip zero value
|
||||
if err == nil && uint64(durationVal) > 0 {
|
||||
field.Set(reflect.ValueOf(durationVal))
|
||||
return nil
|
||||
}
|
||||
|
||||
uintVal, err := key.Uint64()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetUint(uintVal)
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
floatVal, err := key.Float64()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetFloat(floatVal)
|
||||
case reflectTime:
|
||||
timeVal, err := key.Time()
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.Set(reflect.ValueOf(timeVal))
|
||||
case reflect.Slice:
|
||||
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
|
||||
opts := strings.SplitN(tag, ",", 3)
|
||||
rawName = opts[0]
|
||||
if len(opts) > 1 {
|
||||
omitEmpty = opts[1] == "omitempty"
|
||||
}
|
||||
if len(opts) > 2 {
|
||||
allowShadow = opts[2] == "allowshadow"
|
||||
}
|
||||
return rawName, omitEmpty, allowShadow
|
||||
}
|
||||
|
||||
func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := val.Field(i)
|
||||
tpField := typ.Field(i)
|
||||
|
||||
tag := tpField.Tag.Get("ini")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
rawName, _, allowShadow := parseTagOptions(tag)
|
||||
fieldName := s.parseFieldName(tpField.Name, rawName)
|
||||
if len(fieldName) == 0 || !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
isStruct := tpField.Type.Kind() == reflect.Struct
|
||||
isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
|
||||
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
|
||||
if isAnonymous {
|
||||
field.Set(reflect.New(tpField.Type.Elem()))
|
||||
}
|
||||
|
||||
if isAnonymous || isStruct || isStructPtr {
|
||||
if sec, err := s.f.GetSection(fieldName); err == nil {
|
||||
// Only set the field to non-nil struct value if we have
|
||||
// a section for it. Otherwise, we end up with a non-nil
|
||||
// struct ptr even though there is no data.
|
||||
if isStructPtr && field.IsNil() {
|
||||
field.Set(reflect.New(tpField.Type.Elem()))
|
||||
}
|
||||
if err = sec.mapTo(field, isStrict); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if key, err := s.GetKey(fieldName); err == nil {
|
||||
delim := parseDelim(tpField.Tag.Get("delim"))
|
||||
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MapTo maps section to given struct.
|
||||
func (s *Section) MapTo(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot map to non-pointer struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val, false)
|
||||
}
|
||||
|
||||
// StrictMapTo maps section to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func (s *Section) StrictMapTo(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot map to non-pointer struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val, true)
|
||||
}
|
||||
|
||||
// MapTo maps file to given struct.
|
||||
func (f *File) MapTo(v interface{}) error {
|
||||
return f.Section("").MapTo(v)
|
||||
}
|
||||
|
||||
// StrictMapTo maps file to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func (f *File) StrictMapTo(v interface{}) error {
|
||||
return f.Section("").StrictMapTo(v)
|
||||
}
|
||||
|
||||
// MapToWithMapper maps data sources to given struct with name mapper.
|
||||
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||
cfg, err := Load(source, others...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.MapTo(v)
|
||||
}
|
||||
|
||||
// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||
cfg, err := Load(source, others...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.StrictMapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps data sources to given struct.
|
||||
func MapTo(v, source interface{}, others ...interface{}) error {
|
||||
return MapToWithMapper(v, nil, source, others...)
|
||||
}
|
||||
|
||||
// StrictMapTo maps data sources to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func StrictMapTo(v, source interface{}, others ...interface{}) error {
|
||||
return StrictMapToWithMapper(v, nil, source, others...)
|
||||
}
|
||||
|
||||
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
|
||||
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
|
||||
slice := field.Slice(0, field.Len())
|
||||
if field.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
sliceOf := field.Type().Elem().Kind()
|
||||
|
||||
if allowShadow {
|
||||
var keyWithShadows *Key
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
var val string
|
||||
switch sliceOf {
|
||||
case reflect.String:
|
||||
val = slice.Index(i).String()
|
||||
case reflect.Int, reflect.Int64:
|
||||
val = fmt.Sprint(slice.Index(i).Int())
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
val = fmt.Sprint(slice.Index(i).Uint())
|
||||
case reflect.Float64:
|
||||
val = fmt.Sprint(slice.Index(i).Float())
|
||||
case reflectTime:
|
||||
val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
keyWithShadows = newKey(key.s, key.name, val)
|
||||
} else {
|
||||
keyWithShadows.AddShadow(val)
|
||||
}
|
||||
}
|
||||
key = keyWithShadows
|
||||
return nil
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
switch sliceOf {
|
||||
case reflect.String:
|
||||
buf.WriteString(slice.Index(i).String())
|
||||
case reflect.Int, reflect.Int64:
|
||||
buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
|
||||
case reflect.Float64:
|
||||
buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
|
||||
case reflectTime:
|
||||
buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||
}
|
||||
buf.WriteString(delim)
|
||||
}
|
||||
key.SetValue(buf.String()[:buf.Len()-len(delim)])
|
||||
return nil
|
||||
}
|
||||
|
||||
// reflectWithProperType does the opposite thing as setWithProperType.
|
||||
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
key.SetValue(field.String())
|
||||
case reflect.Bool:
|
||||
key.SetValue(fmt.Sprint(field.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
key.SetValue(fmt.Sprint(field.Int()))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
key.SetValue(fmt.Sprint(field.Uint()))
|
||||
case reflect.Float32, reflect.Float64:
|
||||
key.SetValue(fmt.Sprint(field.Float()))
|
||||
case reflectTime:
|
||||
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
|
||||
case reflect.Slice:
|
||||
return reflectSliceWithProperType(key, field, delim, allowShadow)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CR: copied from encoding/json/encode.go with modifications of time.Time support.
|
||||
// TODO: add more test coverage.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflectTime:
|
||||
t, ok := v.Interface().(time.Time)
|
||||
return ok && t.IsZero()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Section) reflectFrom(val reflect.Value) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := val.Field(i)
|
||||
tpField := typ.Field(i)
|
||||
|
||||
tag := tpField.Tag.Get("ini")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
rawName, omitEmpty, allowShadow := parseTagOptions(tag)
|
||||
if omitEmpty && isEmptyValue(field) {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := s.parseFieldName(tpField.Name, rawName)
|
||||
if len(fieldName) == 0 || !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
|
||||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
|
||||
// Note: The only error here is section doesn't exist.
|
||||
sec, err := s.f.GetSection(fieldName)
|
||||
if err != nil {
|
||||
// Note: fieldName can never be empty here, ignore error.
|
||||
sec, _ = s.f.NewSection(fieldName)
|
||||
}
|
||||
|
||||
// Add comment from comment tag
|
||||
if len(sec.Comment) == 0 {
|
||||
sec.Comment = tpField.Tag.Get("comment")
|
||||
}
|
||||
|
||||
if err = sec.reflectFrom(field); err != nil {
|
||||
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Note: Same reason as secion.
|
||||
key, err := s.GetKey(fieldName)
|
||||
if err != nil {
|
||||
key, _ = s.NewKey(fieldName, "")
|
||||
}
|
||||
|
||||
// Add comment from comment tag
|
||||
if len(key.Comment) == 0 {
|
||||
key.Comment = tpField.Tag.Get("comment")
|
||||
}
|
||||
|
||||
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim")), allowShadow); err != nil {
|
||||
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReflectFrom reflects secion from given struct.
|
||||
func (s *Section) ReflectFrom(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot reflect from non-pointer struct")
|
||||
}
|
||||
|
||||
return s.reflectFrom(val)
|
||||
}
|
||||
|
||||
// ReflectFrom reflects file from given struct.
|
||||
func (f *File) ReflectFrom(v interface{}) error {
|
||||
return f.Section("").ReflectFrom(v)
|
||||
}
|
||||
|
||||
// ReflectFromWithMapper reflects data sources from given struct with name mapper.
|
||||
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.ReflectFrom(v)
|
||||
}
|
||||
|
||||
// ReflectFrom reflects data sources from given struct.
|
||||
func ReflectFrom(cfg *File, v interface{}) error {
|
||||
return ReflectFromWithMapper(cfg, v, nil)
|
||||
}
|
6
vendor/modules.txt
vendored
Normal file
6
vendor/modules.txt
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc
|
||||
github.com/influxdata/influxdb1-client
|
||||
github.com/influxdata/influxdb1-client/models
|
||||
github.com/influxdata/influxdb1-client/pkg/escape
|
||||
# gopkg.in/ini.v1 v1.44.0
|
||||
gopkg.in/ini.v1
|
45
weather.go
45
weather.go
@ -2,11 +2,10 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
_ "github.com/influxdata/influxdb1-client"
|
||||
client "github.com/influxdata/influxdb1-client/v2"
|
||||
)
|
||||
|
||||
var err error
|
||||
@ -20,36 +19,20 @@ func main() {
|
||||
|
||||
flag.StringVar(&configpath, "configfile", "common.ini", "config file to use with fuelprices section")
|
||||
flag.Parse()
|
||||
GetConfig(configpath, &wc)
|
||||
|
||||
httpClient, err := client.NewHTTPClient(client.HTTPConfig{
|
||||
Addr: fmt.Sprintf("http://%s:%d", wc.InfluxHost, wc.InfluxPort),
|
||||
Username: wc.InfluxUser,
|
||||
Password: wc.InfluxPass,
|
||||
})
|
||||
HandleFatalError(err)
|
||||
var data []Data
|
||||
|
||||
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
|
||||
Database: wc.InfluxDB,
|
||||
})
|
||||
HandleFatalError(err)
|
||||
|
||||
Get
|
||||
|
||||
for _, p := range *prices {
|
||||
|
||||
tags := map[string]string{"pdv": p.ID, "fuel": p.Fuel}
|
||||
fields := map[string]interface{}{"value": p.Amount}
|
||||
|
||||
point, _ := client.NewPoint(
|
||||
wc.InfluxTable,
|
||||
tags,
|
||||
fields,
|
||||
now,
|
||||
)
|
||||
|
||||
bp.AddPoint(point)
|
||||
err = httpClient.Write(bp)
|
||||
HandleError(err)
|
||||
err := GetConfig(configpath, &wc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, city := range wc.WeatherCities {
|
||||
d, _ := FetchData(city)
|
||||
data = append(data, d)
|
||||
}
|
||||
|
||||
// bp, err := client.NewBatchPoints(client.BatchPointsConfig{
|
||||
// Database: wc.InfluxDB,
|
||||
// })
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user