updates master branch
This commit is contained in:
commit
ed2dce5f92
2
go.mod
2
go.mod
@ -5,8 +5,8 @@ go 1.14
|
||||
require (
|
||||
github.com/antchfx/htmlquery v1.0.0
|
||||
github.com/antchfx/xpath v1.0.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.4.1 // indirect
|
||||
github.com/gobuffalo/packr/v2 v2.8.0
|
||||
github.com/jinzhu/gorm v1.9.12
|
||||
github.com/jmoiron/sqlx v1.2.0
|
||||
github.com/labstack/echo/v4 v4.1.16
|
||||
github.com/lib/pq v1.6.0
|
||||
|
19
go.sum
19
go.sum
@ -24,8 +24,12 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
@ -45,6 +49,8 @@ github.com/gobuffalo/packr/v2 v2.8.0 h1:IULGd15bQL59ijXLxEvA5wlMxsmx/ZkQv9T282zN
|
||||
github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
@ -79,6 +85,12 @@ github.com/jcmturner/gokrb5/v8 v8.2.0 h1:lzPl/30ZLkTveYsYZPKMcgXc8MbnE6RsTd4F9Kg
|
||||
github.com/jcmturner/gokrb5/v8 v8.2.0/go.mod h1:T1hnNppQsBtxW0tCHMHTkAt8n/sABdzZgZdoFrZaZNM=
|
||||
github.com/jcmturner/rpc/v2 v2.0.2 h1:gMB4IwRXYsWw4Bc6o/az2HJgFUA1ffSh90i26ZJ6Xl0=
|
||||
github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jinzhu/gorm v1.9.12 h1:Drgk1clyWT9t9ERbzHza6Mj/8FY/CqMyVzOiHviMo6Q=
|
||||
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.0.1 h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=
|
||||
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
@ -104,6 +116,7 @@ github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.6.0 h1:I5DPxhYJChW9KYc66se+oKFFQX6VuQrKiprsX6ivRZc=
|
||||
github.com/lib/pq v1.6.0/go.mod h1:4vXEAYvW1fRQ2/FhZ78H73A60MHw1geSm145z2mdY1g=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
@ -124,6 +137,8 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
|
||||
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
@ -189,9 +204,11 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@ -200,6 +217,7 @@ golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -248,6 +266,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
|
||||
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
|
7
settings.json
Normal file
7
settings.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"files.watcherExclude": {
|
||||
".git/objects/**": true,
|
||||
".git/subtree-cache/**": true,
|
||||
"vendor/**": true
|
||||
}
|
||||
}
|
@ -40,8 +40,7 @@ func (config *Config) GetConfig() error {
|
||||
config.Cron = qrzsection.Key("cron").MustString("@every 1h")
|
||||
|
||||
config.DbSchema = fmt.Sprintf(`
|
||||
CREATE TABLE IF NOT EXISTS %s
|
||||
(
|
||||
CREATE TABLE IF NOT EXISTS %[1]s (
|
||||
id serial NOT NULL PRIMARY KEY,
|
||||
qrz varchar(25) NOT NULL,
|
||||
name varchar(25) DEFAULT NULL,
|
||||
@ -51,23 +50,47 @@ func (config *Config) GetConfig() error {
|
||||
dept varchar(50) DEFAULT NULL,
|
||||
country varchar(25) DEFAULT NULL,
|
||||
dmrid varchar(25) DEFAULT NULL,
|
||||
fts tsvector NULL,
|
||||
CONSTRAINT qrz_unique UNIQUE(qrz, name, city, dept, country)
|
||||
)`,
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS fts_idx ON %[1]s (fts);
|
||||
|
||||
CREATE OR REPLACE FUNCTION updateFTS() RETURNS trigger AS $fts_trigger$
|
||||
BEGIN
|
||||
IF pg_trigger_depth() <> 1 THEN
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
UPDATE %[1]s SET fts = to_tsvector(concat_ws(' ', qrz, name, city, dept, country)) WHERE id = NEW.id;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$fts_trigger$ LANGUAGE plpgsql;
|
||||
|
||||
DROP TRIGGER IF EXISTS fts_trigger ON %[1]s;
|
||||
|
||||
CREATE TRIGGER fts_trigger
|
||||
AFTER INSERT ON %[1]s
|
||||
FOR EACH ROW EXECUTE PROCEDURE updateFTS();
|
||||
`,
|
||||
config.DbTable)
|
||||
|
||||
config.DbStatements.Insert = fmt.Sprintf(
|
||||
`INSERT INTO %s (qrz, dmrid, name, city, dept, country)
|
||||
VALUES ($1, $2, $3, $4, $5, $6);`,
|
||||
config.DbTable)
|
||||
config.DbStatements.ExportCSV = fmt.Sprintf(
|
||||
`SELECT qrz, name, city, dept, country
|
||||
FROM %s;`,
|
||||
config.DbStatements.Insert = fmt.Sprintf(`
|
||||
INSERT INTO %[1]s (qrz, dmrid, name, city, dept, country)
|
||||
VALUES ($1, $2, $3, $4, $5, $6);
|
||||
`,
|
||||
config.DbTable)
|
||||
|
||||
config.DbStatements.Countries = fmt.Sprintf(
|
||||
`SELECT country
|
||||
FROM %s
|
||||
GROUP BY country;`,
|
||||
config.DbStatements.ExportCSV = fmt.Sprintf(`
|
||||
SELECT qrz, name, city, dept, country
|
||||
FROM %[1]s;
|
||||
`,
|
||||
config.DbTable)
|
||||
|
||||
config.DbStatements.Countries = fmt.Sprintf(`
|
||||
SELECT country
|
||||
FROM %[1]s
|
||||
GROUP BY country;
|
||||
`,
|
||||
config.DbTable)
|
||||
|
||||
config.URLBase = `http://groupe-frs.hamstation.eu/index_qrz_liste_%s.php`
|
||||
|
@ -8,9 +8,11 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.paulbsd.com/paulbsd/qrz/src/config"
|
||||
"github.com/antchfx/htmlquery"
|
||||
"github.com/jinzhu/gorm"
|
||||
"github.com/robfig/cron"
|
||||
)
|
||||
|
||||
@ -172,4 +174,19 @@ type FrsEntry struct {
|
||||
ZipCode sql.NullString `db:"zipcode"`
|
||||
Dept sql.NullString `db:"dept"`
|
||||
Country sql.NullString `db:"country"`
|
||||
Fts sql.NullString `db:"fts"`
|
||||
}
|
||||
|
||||
// Qrz is the structure of qrz entries
|
||||
type Qrz struct {
|
||||
gorm.Model
|
||||
Name string
|
||||
Age sql.NullInt64
|
||||
Birthday *time.Time
|
||||
Email string `gorm:"type:varchar(100);unique_index"`
|
||||
Role string `gorm:"size:255"` // set field size to 255
|
||||
MemberNumber *string `gorm:"unique;not null"` // set member number to unique and not null
|
||||
Num int `gorm:"AUTO_INCREMENT"` // set num to auto incrementable
|
||||
Address string `gorm:"index:addr"` // create index with name `addr` for address
|
||||
IgnoreMe int `gorm:"-"` // ignore this field
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func BuildQuery(config config.Config, qrzdt DatatableInput) (rows *sqlx.Rows, er
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchstatement, err = SetSearchStatement(config, qrzdt)
|
||||
searchstatement, err = SetSearchLikeStatement(config, qrzdt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -126,8 +126,8 @@ func BuildQuery(config config.Config, qrzdt DatatableInput) (rows *sqlx.Rows, er
|
||||
}
|
||||
|
||||
// BuildQueryCountFiltered builds query for counting filtered
|
||||
func BuildQueryCountFiltered(config config.Config, qrzdt DatatableInput) (cnt int, err error) {
|
||||
searchstatement, err := SetSearchStatement(config, qrzdt)
|
||||
func BuildQueryCountFiltered(config config.Config, qrzdt QrzDatatableInput) (cnt int, err error) {
|
||||
searchstatement, err := SetSearchLikeStatement(config, qrzdt)
|
||||
|
||||
query := `
|
||||
SELECT COUNT(*)
|
||||
@ -140,8 +140,11 @@ func BuildQueryCountFiltered(config config.Config, qrzdt DatatableInput) (cnt in
|
||||
}
|
||||
|
||||
// BuildQueryCountTotal builds query for counting totals
|
||||
func BuildQueryCountTotal(config config.Config, qrzdt DatatableInput) (cnt int, err error) {
|
||||
err = config.Db.Get(&cnt, fmt.Sprintf("SELECT COUNT(*) FROM %s;", config.DbTable))
|
||||
func BuildQueryCountTotal(config config.Config, qrzdt QrzDatatableInput) (cnt int, err error) {
|
||||
query := `
|
||||
SELECT COUNT(*) FROM %s;
|
||||
`
|
||||
err = config.Db.Get(&cnt, fmt.Sprintf(query, config.DbTable))
|
||||
|
||||
return
|
||||
}
|
||||
@ -171,7 +174,7 @@ func SetSelectStatement(config config.Config, qrzdt DatatableInput) (selectstate
|
||||
// SetOrderStatement build the sql order statement part
|
||||
func SetOrderStatement(config config.Config, qrzdt DatatableInput) (orderstmt string, err error) {
|
||||
var orderstmts []string
|
||||
colre := regexp.MustCompile(`^[a-z]+$`)
|
||||
colre := regexp.MustCompile(`^[A-Za-z0-9]+$`)
|
||||
orderre := regexp.MustCompile(`^(ASC|asc|DESC|desc)$`)
|
||||
for _, col := range qrzdt.Order {
|
||||
if colre.MatchString(qrzdt.Columns[col.Column].Name) && orderre.MatchString(col.Dir) {
|
||||
@ -202,8 +205,8 @@ func SetLimitStatement(config config.Config, qrzdt DatatableInput) (limitstmt st
|
||||
return
|
||||
}
|
||||
|
||||
// SetSearchStatement build the where clause in sql statement
|
||||
func SetSearchStatement(config config.Config, qrzdt DatatableInput) (searchstmt string, err error) {
|
||||
// SetSearchLikeStatement build the where clause in sql statement
|
||||
func SetSearchLikeStatement(config config.Config, qrzdt QrzDatatableInput) (searchstmt string, err error) {
|
||||
var searchstmtslice []string
|
||||
if len(qrzdt.Columns) > 0 {
|
||||
for id, i := range qrzdt.Columns {
|
||||
@ -220,6 +223,16 @@ func SetSearchStatement(config config.Config, qrzdt DatatableInput) (searchstmt
|
||||
return
|
||||
}
|
||||
|
||||
// SetSearchFTSStatement build the where clause in sql statement
|
||||
func SetSearchFTSStatement(config config.Config, qrzdt QrzDatatableInput) (searchstmt string, err error) {
|
||||
if len(qrzdt.Columns) > 0 {
|
||||
searchstmt = fmt.Sprintf("fts @@ plainto_tsquery('%s')", qrzdt.Search.Value)
|
||||
} else {
|
||||
searchstmt = "1=1"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RunCSVExport runs the main loop
|
||||
func RunCSVExport(c echo.Context, config config.Config) (data []byte, mime string, err error) {
|
||||
mime = "text/csv"
|
||||
|
@ -3,22 +3,14 @@ body
|
||||
font-family: "Helvetica Neue", "HelveticaNeue", "Helvetica", "Arial", "sans-serif";
|
||||
}
|
||||
|
||||
.dataTables_length
|
||||
#main_table_length
|
||||
{
|
||||
font-size: 18px;
|
||||
float: left;
|
||||
}
|
||||
|
||||
.dataTables_filter
|
||||
#main_table_filter
|
||||
{
|
||||
font-size: 18px;
|
||||
float: right;
|
||||
}
|
||||
|
||||
#main_table
|
||||
{
|
||||
margin-top: 200px;
|
||||
}
|
||||
|
||||
td
|
||||
{
|
||||
width: 1000px;
|
||||
}
|
||||
#main_table_paginate
|
||||
|
13
static/css/materialize.min.css
vendored
Normal file
13
static/css/materialize.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
@ -5,6 +5,7 @@
|
||||
<link rel="stylesheet" type="text/css" href="static/css/jquery.dataTables.min.css" media="screen">
|
||||
<link rel="stylesheet" type="text/css" href="static/css/font-awesome/all.css">
|
||||
<link rel="stylesheet" type="text/css" href="static/css/bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="static/css/dataTables.bootstrap4.min.css">
|
||||
|
||||
<script type="text/javascript" src="static/js/jquery.js"></script>
|
||||
<script type="text/javascript" src="static/js/jquery.dataTables.min.js"></script>
|
||||
@ -19,7 +20,7 @@
|
||||
<h1 class="jumbotron-heading">FRS QRZ database</h1>
|
||||
<p>Mirror of <a href="http://groupe-frs.hamstation.eu">http://groupe-frs.hamstation.eu</a> list of users</p>
|
||||
<div>
|
||||
<table id="main_table" class="display cell-border">
|
||||
<table id="main_table" class="display table table-hover">
|
||||
<thead class="thead-dark">
|
||||
<tr>
|
||||
<td>QRZ</td>
|
||||
|
3
vendor/github.com/jinzhu/gorm/.gitignore
generated
vendored
Normal file
3
vendor/github.com/jinzhu/gorm/.gitignore
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
documents
|
||||
coverage.txt
|
||||
_book
|
21
vendor/github.com/jinzhu/gorm/License
generated
vendored
Normal file
21
vendor/github.com/jinzhu/gorm/License
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013-NOW Jinzhu <wosmvp@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
41
vendor/github.com/jinzhu/gorm/README.md
generated
vendored
Normal file
41
vendor/github.com/jinzhu/gorm/README.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
# GORM
|
||||
|
||||
The fantastic ORM library for Golang, aims to be developer friendly.
|
||||
|
||||
[![go report card](https://goreportcard.com/badge/github.com/jinzhu/gorm "go report card")](https://goreportcard.com/report/github.com/jinzhu/gorm)
|
||||
[![wercker status](https://app.wercker.com/status/8596cace912c9947dd9c8542ecc8cb8b/s/master "wercker status")](https://app.wercker.com/project/byKey/8596cace912c9947dd9c8542ecc8cb8b)
|
||||
[![codecov](https://codecov.io/gh/jinzhu/gorm/branch/master/graph/badge.svg)](https://codecov.io/gh/jinzhu/gorm)
|
||||
[![Join the chat at https://gitter.im/jinzhu/gorm](https://img.shields.io/gitter/room/jinzhu/gorm.svg)](https://gitter.im/jinzhu/gorm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[![Open Collective Backer](https://opencollective.com/gorm/tiers/backer/badge.svg?label=backer&color=brightgreen "Open Collective Backer")](https://opencollective.com/gorm)
|
||||
[![Open Collective Sponsor](https://opencollective.com/gorm/tiers/sponsor/badge.svg?label=sponsor&color=brightgreen "Open Collective Sponsor")](https://opencollective.com/gorm)
|
||||
[![MIT license](https://img.shields.io/badge/license-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT)
|
||||
[![GoDoc](https://godoc.org/github.com/jinzhu/gorm?status.svg)](https://godoc.org/github.com/jinzhu/gorm)
|
||||
|
||||
## Overview
|
||||
|
||||
* Full-Featured ORM (almost)
|
||||
* Associations (Has One, Has Many, Belongs To, Many To Many, Polymorphism)
|
||||
* Hooks (Before/After Create/Save/Update/Delete/Find)
|
||||
* Preloading (eager loading)
|
||||
* Transactions
|
||||
* Composite Primary Key
|
||||
* SQL Builder
|
||||
* Auto Migrations
|
||||
* Logger
|
||||
* Extendable, write Plugins based on GORM callbacks
|
||||
* Every feature comes with tests
|
||||
* Developer Friendly
|
||||
|
||||
## Getting Started
|
||||
|
||||
* GORM Guides [https://gorm.io](https://gorm.io)
|
||||
|
||||
## Contributing
|
||||
|
||||
[You can help to deliver a better GORM, check out things you can do](https://gorm.io/contribute.html)
|
||||
|
||||
## License
|
||||
|
||||
© Jinzhu, 2013~time.Now
|
||||
|
||||
Released under the [MIT License](https://github.com/jinzhu/gorm/blob/master/License)
|
377
vendor/github.com/jinzhu/gorm/association.go
generated
vendored
Normal file
377
vendor/github.com/jinzhu/gorm/association.go
generated
vendored
Normal file
@ -0,0 +1,377 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Association Mode contains some helper methods to handle relationship things easily.
|
||||
type Association struct {
|
||||
Error error
|
||||
scope *Scope
|
||||
column string
|
||||
field *Field
|
||||
}
|
||||
|
||||
// Find find out all related associations
|
||||
func (association *Association) Find(value interface{}) *Association {
|
||||
association.scope.related(value, association.column)
|
||||
return association.setErr(association.scope.db.Error)
|
||||
}
|
||||
|
||||
// Append append new associations for many2many, has_many, replace current association for has_one, belongs_to
|
||||
func (association *Association) Append(values ...interface{}) *Association {
|
||||
if association.Error != nil {
|
||||
return association
|
||||
}
|
||||
|
||||
if relationship := association.field.Relationship; relationship.Kind == "has_one" {
|
||||
return association.Replace(values...)
|
||||
}
|
||||
return association.saveAssociations(values...)
|
||||
}
|
||||
|
||||
// Replace replace current associations with new one
|
||||
func (association *Association) Replace(values ...interface{}) *Association {
|
||||
if association.Error != nil {
|
||||
return association
|
||||
}
|
||||
|
||||
var (
|
||||
relationship = association.field.Relationship
|
||||
scope = association.scope
|
||||
field = association.field.Field
|
||||
newDB = scope.NewDB()
|
||||
)
|
||||
|
||||
// Append new values
|
||||
association.field.Set(reflect.Zero(association.field.Field.Type()))
|
||||
association.saveAssociations(values...)
|
||||
|
||||
// Belongs To
|
||||
if relationship.Kind == "belongs_to" {
|
||||
// Set foreign key to be null when clearing value (length equals 0)
|
||||
if len(values) == 0 {
|
||||
// Set foreign key to be nil
|
||||
var foreignKeyMap = map[string]interface{}{}
|
||||
for _, foreignKey := range relationship.ForeignDBNames {
|
||||
foreignKeyMap[foreignKey] = nil
|
||||
}
|
||||
association.setErr(newDB.Model(scope.Value).UpdateColumn(foreignKeyMap).Error)
|
||||
}
|
||||
} else {
|
||||
// Polymorphic Relations
|
||||
if relationship.PolymorphicDBName != "" {
|
||||
newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue)
|
||||
}
|
||||
|
||||
// Delete Relations except new created
|
||||
if len(values) > 0 {
|
||||
var associationForeignFieldNames, associationForeignDBNames []string
|
||||
if relationship.Kind == "many_to_many" {
|
||||
// if many to many relations, get association fields name from association foreign keys
|
||||
associationScope := scope.New(reflect.New(field.Type()).Interface())
|
||||
for idx, dbName := range relationship.AssociationForeignFieldNames {
|
||||
if field, ok := associationScope.FieldByName(dbName); ok {
|
||||
associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
|
||||
associationForeignDBNames = append(associationForeignDBNames, relationship.AssociationForeignDBNames[idx])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If has one/many relations, use primary keys
|
||||
for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() {
|
||||
associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
|
||||
associationForeignDBNames = append(associationForeignDBNames, field.DBName)
|
||||
}
|
||||
}
|
||||
|
||||
newPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, field.Interface())
|
||||
|
||||
if len(newPrimaryKeys) > 0 {
|
||||
sql := fmt.Sprintf("%v NOT IN (%v)", toQueryCondition(scope, associationForeignDBNames), toQueryMarks(newPrimaryKeys))
|
||||
newDB = newDB.Where(sql, toQueryValues(newPrimaryKeys)...)
|
||||
}
|
||||
}
|
||||
|
||||
if relationship.Kind == "many_to_many" {
|
||||
// if many to many relations, delete related relations from join table
|
||||
var sourceForeignFieldNames []string
|
||||
|
||||
for _, dbName := range relationship.ForeignFieldNames {
|
||||
if field, ok := scope.FieldByName(dbName); ok {
|
||||
sourceForeignFieldNames = append(sourceForeignFieldNames, field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if sourcePrimaryKeys := scope.getColumnAsArray(sourceForeignFieldNames, scope.Value); len(sourcePrimaryKeys) > 0 {
|
||||
newDB = newDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(sourcePrimaryKeys)), toQueryValues(sourcePrimaryKeys)...)
|
||||
|
||||
association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB))
|
||||
}
|
||||
} else if relationship.Kind == "has_one" || relationship.Kind == "has_many" {
|
||||
// has_one or has_many relations, set foreign key to be nil (TODO or delete them?)
|
||||
var foreignKeyMap = map[string]interface{}{}
|
||||
for idx, foreignKey := range relationship.ForeignDBNames {
|
||||
foreignKeyMap[foreignKey] = nil
|
||||
if field, ok := scope.FieldByName(relationship.AssociationForeignFieldNames[idx]); ok {
|
||||
newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
fieldValue := reflect.New(association.field.Field.Type()).Interface()
|
||||
association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error)
|
||||
}
|
||||
}
|
||||
return association
|
||||
}
|
||||
|
||||
// Delete remove relationship between source & passed arguments, but won't delete those arguments
|
||||
func (association *Association) Delete(values ...interface{}) *Association {
|
||||
if association.Error != nil {
|
||||
return association
|
||||
}
|
||||
|
||||
var (
|
||||
relationship = association.field.Relationship
|
||||
scope = association.scope
|
||||
field = association.field.Field
|
||||
newDB = scope.NewDB()
|
||||
)
|
||||
|
||||
if len(values) == 0 {
|
||||
return association
|
||||
}
|
||||
|
||||
var deletingResourcePrimaryFieldNames, deletingResourcePrimaryDBNames []string
|
||||
for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() {
|
||||
deletingResourcePrimaryFieldNames = append(deletingResourcePrimaryFieldNames, field.Name)
|
||||
deletingResourcePrimaryDBNames = append(deletingResourcePrimaryDBNames, field.DBName)
|
||||
}
|
||||
|
||||
deletingPrimaryKeys := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, values...)
|
||||
|
||||
if relationship.Kind == "many_to_many" {
|
||||
// source value's foreign keys
|
||||
for idx, foreignKey := range relationship.ForeignDBNames {
|
||||
if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok {
|
||||
newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
// get association's foreign fields name
|
||||
var associationScope = scope.New(reflect.New(field.Type()).Interface())
|
||||
var associationForeignFieldNames []string
|
||||
for _, associationDBName := range relationship.AssociationForeignFieldNames {
|
||||
if field, ok := associationScope.FieldByName(associationDBName); ok {
|
||||
associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// association value's foreign keys
|
||||
deletingPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, values...)
|
||||
sql := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(deletingPrimaryKeys))
|
||||
newDB = newDB.Where(sql, toQueryValues(deletingPrimaryKeys)...)
|
||||
|
||||
association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB))
|
||||
} else {
|
||||
var foreignKeyMap = map[string]interface{}{}
|
||||
for _, foreignKey := range relationship.ForeignDBNames {
|
||||
foreignKeyMap[foreignKey] = nil
|
||||
}
|
||||
|
||||
if relationship.Kind == "belongs_to" {
|
||||
// find with deleting relation's foreign keys
|
||||
primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, values...)
|
||||
newDB = newDB.Where(
|
||||
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
|
||||
toQueryValues(primaryKeys)...,
|
||||
)
|
||||
|
||||
// set foreign key to be null if there are some records affected
|
||||
modelValue := reflect.New(scope.GetModelStruct().ModelType).Interface()
|
||||
if results := newDB.Model(modelValue).UpdateColumn(foreignKeyMap); results.Error == nil {
|
||||
if results.RowsAffected > 0 {
|
||||
scope.updatedAttrsWithValues(foreignKeyMap)
|
||||
}
|
||||
} else {
|
||||
association.setErr(results.Error)
|
||||
}
|
||||
} else if relationship.Kind == "has_one" || relationship.Kind == "has_many" {
|
||||
// find all relations
|
||||
primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value)
|
||||
newDB = newDB.Where(
|
||||
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
|
||||
toQueryValues(primaryKeys)...,
|
||||
)
|
||||
|
||||
// only include those deleting relations
|
||||
newDB = newDB.Where(
|
||||
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, deletingResourcePrimaryDBNames), toQueryMarks(deletingPrimaryKeys)),
|
||||
toQueryValues(deletingPrimaryKeys)...,
|
||||
)
|
||||
|
||||
// set matched relation's foreign key to be null
|
||||
fieldValue := reflect.New(association.field.Field.Type()).Interface()
|
||||
association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove deleted records from source's field
|
||||
if association.Error == nil {
|
||||
if field.Kind() == reflect.Slice {
|
||||
leftValues := reflect.Zero(field.Type())
|
||||
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
reflectValue := field.Index(i)
|
||||
primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, reflectValue.Interface())[0]
|
||||
var isDeleted = false
|
||||
for _, pk := range deletingPrimaryKeys {
|
||||
if equalAsString(primaryKey, pk) {
|
||||
isDeleted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isDeleted {
|
||||
leftValues = reflect.Append(leftValues, reflectValue)
|
||||
}
|
||||
}
|
||||
|
||||
association.field.Set(leftValues)
|
||||
} else if field.Kind() == reflect.Struct {
|
||||
primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, field.Interface())[0]
|
||||
for _, pk := range deletingPrimaryKeys {
|
||||
if equalAsString(primaryKey, pk) {
|
||||
association.field.Set(reflect.Zero(field.Type()))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return association
|
||||
}
|
||||
|
||||
// Clear remove relationship between source & current associations, won't delete those associations
|
||||
func (association *Association) Clear() *Association {
|
||||
return association.Replace()
|
||||
}
|
||||
|
||||
// Count return the count of current associations
|
||||
func (association *Association) Count() int {
|
||||
var (
|
||||
count = 0
|
||||
relationship = association.field.Relationship
|
||||
scope = association.scope
|
||||
fieldValue = association.field.Field.Interface()
|
||||
query = scope.DB()
|
||||
)
|
||||
|
||||
switch relationship.Kind {
|
||||
case "many_to_many":
|
||||
query = relationship.JoinTableHandler.JoinWith(relationship.JoinTableHandler, query, scope.Value)
|
||||
case "has_many", "has_one":
|
||||
primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value)
|
||||
query = query.Where(
|
||||
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
|
||||
toQueryValues(primaryKeys)...,
|
||||
)
|
||||
case "belongs_to":
|
||||
primaryKeys := scope.getColumnAsArray(relationship.ForeignFieldNames, scope.Value)
|
||||
query = query.Where(
|
||||
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(primaryKeys)),
|
||||
toQueryValues(primaryKeys)...,
|
||||
)
|
||||
}
|
||||
|
||||
if relationship.PolymorphicType != "" {
|
||||
query = query.Where(
|
||||
fmt.Sprintf("%v.%v = ?", scope.New(fieldValue).QuotedTableName(), scope.Quote(relationship.PolymorphicDBName)),
|
||||
relationship.PolymorphicValue,
|
||||
)
|
||||
}
|
||||
|
||||
if err := query.Model(fieldValue).Count(&count).Error; err != nil {
|
||||
association.Error = err
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// saveAssociations save passed values as associations
|
||||
func (association *Association) saveAssociations(values ...interface{}) *Association {
|
||||
var (
|
||||
scope = association.scope
|
||||
field = association.field
|
||||
relationship = field.Relationship
|
||||
)
|
||||
|
||||
saveAssociation := func(reflectValue reflect.Value) {
|
||||
// value has to been pointer
|
||||
if reflectValue.Kind() != reflect.Ptr {
|
||||
reflectPtr := reflect.New(reflectValue.Type())
|
||||
reflectPtr.Elem().Set(reflectValue)
|
||||
reflectValue = reflectPtr
|
||||
}
|
||||
|
||||
// value has to been saved for many2many
|
||||
if relationship.Kind == "many_to_many" {
|
||||
if scope.New(reflectValue.Interface()).PrimaryKeyZero() {
|
||||
association.setErr(scope.NewDB().Save(reflectValue.Interface()).Error)
|
||||
}
|
||||
}
|
||||
|
||||
// Assign Fields
|
||||
var fieldType = field.Field.Type()
|
||||
var setFieldBackToValue, setSliceFieldBackToValue bool
|
||||
if reflectValue.Type().AssignableTo(fieldType) {
|
||||
field.Set(reflectValue)
|
||||
} else if reflectValue.Type().Elem().AssignableTo(fieldType) {
|
||||
// if field's type is struct, then need to set value back to argument after save
|
||||
setFieldBackToValue = true
|
||||
field.Set(reflectValue.Elem())
|
||||
} else if fieldType.Kind() == reflect.Slice {
|
||||
if reflectValue.Type().AssignableTo(fieldType.Elem()) {
|
||||
field.Set(reflect.Append(field.Field, reflectValue))
|
||||
} else if reflectValue.Type().Elem().AssignableTo(fieldType.Elem()) {
|
||||
// if field's type is slice of struct, then need to set value back to argument after save
|
||||
setSliceFieldBackToValue = true
|
||||
field.Set(reflect.Append(field.Field, reflectValue.Elem()))
|
||||
}
|
||||
}
|
||||
|
||||
if relationship.Kind == "many_to_many" {
|
||||
association.setErr(relationship.JoinTableHandler.Add(relationship.JoinTableHandler, scope.NewDB(), scope.Value, reflectValue.Interface()))
|
||||
} else {
|
||||
association.setErr(scope.NewDB().Select(field.Name).Save(scope.Value).Error)
|
||||
|
||||
if setFieldBackToValue {
|
||||
reflectValue.Elem().Set(field.Field)
|
||||
} else if setSliceFieldBackToValue {
|
||||
reflectValue.Elem().Set(field.Field.Index(field.Field.Len() - 1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
reflectValue := reflect.ValueOf(value)
|
||||
indirectReflectValue := reflect.Indirect(reflectValue)
|
||||
if indirectReflectValue.Kind() == reflect.Struct {
|
||||
saveAssociation(reflectValue)
|
||||
} else if indirectReflectValue.Kind() == reflect.Slice {
|
||||
for i := 0; i < indirectReflectValue.Len(); i++ {
|
||||
saveAssociation(indirectReflectValue.Index(i))
|
||||
}
|
||||
} else {
|
||||
association.setErr(errors.New("invalid value type"))
|
||||
}
|
||||
}
|
||||
return association
|
||||
}
|
||||
|
||||
// setErr set error when the error is not nil. And return Association.
|
||||
func (association *Association) setErr(err error) *Association {
|
||||
if err != nil {
|
||||
association.Error = err
|
||||
}
|
||||
return association
|
||||
}
|
250
vendor/github.com/jinzhu/gorm/callback.go
generated
vendored
Normal file
250
vendor/github.com/jinzhu/gorm/callback.go
generated
vendored
Normal file
@ -0,0 +1,250 @@
|
||||
package gorm
|
||||
|
||||
import "fmt"
|
||||
|
||||
// DefaultCallback default callbacks defined by gorm
|
||||
var DefaultCallback = &Callback{logger: nopLogger{}}
|
||||
|
||||
// Callback is a struct that contains all CRUD callbacks
|
||||
// Field `creates` contains callbacks will be call when creating object
|
||||
// Field `updates` contains callbacks will be call when updating object
|
||||
// Field `deletes` contains callbacks will be call when deleting object
|
||||
// Field `queries` contains callbacks will be call when querying object with query methods like Find, First, Related, Association...
|
||||
// Field `rowQueries` contains callbacks will be call when querying object with Row, Rows...
|
||||
// Field `processors` contains all callback processors, will be used to generate above callbacks in order
|
||||
type Callback struct {
|
||||
logger logger
|
||||
creates []*func(scope *Scope)
|
||||
updates []*func(scope *Scope)
|
||||
deletes []*func(scope *Scope)
|
||||
queries []*func(scope *Scope)
|
||||
rowQueries []*func(scope *Scope)
|
||||
processors []*CallbackProcessor
|
||||
}
|
||||
|
||||
// CallbackProcessor contains callback informations
|
||||
type CallbackProcessor struct {
|
||||
logger logger
|
||||
name string // current callback's name
|
||||
before string // register current callback before a callback
|
||||
after string // register current callback after a callback
|
||||
replace bool // replace callbacks with same name
|
||||
remove bool // delete callbacks with same name
|
||||
kind string // callback type: create, update, delete, query, row_query
|
||||
processor *func(scope *Scope) // callback handler
|
||||
parent *Callback
|
||||
}
|
||||
|
||||
func (c *Callback) clone(logger logger) *Callback {
|
||||
return &Callback{
|
||||
logger: logger,
|
||||
creates: c.creates,
|
||||
updates: c.updates,
|
||||
deletes: c.deletes,
|
||||
queries: c.queries,
|
||||
rowQueries: c.rowQueries,
|
||||
processors: c.processors,
|
||||
}
|
||||
}
|
||||
|
||||
// Create could be used to register callbacks for creating object
|
||||
// db.Callback().Create().After("gorm:create").Register("plugin:run_after_create", func(*Scope) {
|
||||
// // business logic
|
||||
// ...
|
||||
//
|
||||
// // set error if some thing wrong happened, will rollback the creating
|
||||
// scope.Err(errors.New("error"))
|
||||
// })
|
||||
func (c *Callback) Create() *CallbackProcessor {
|
||||
return &CallbackProcessor{logger: c.logger, kind: "create", parent: c}
|
||||
}
|
||||
|
||||
// Update could be used to register callbacks for updating object, refer `Create` for usage
|
||||
func (c *Callback) Update() *CallbackProcessor {
|
||||
return &CallbackProcessor{logger: c.logger, kind: "update", parent: c}
|
||||
}
|
||||
|
||||
// Delete could be used to register callbacks for deleting object, refer `Create` for usage
|
||||
func (c *Callback) Delete() *CallbackProcessor {
|
||||
return &CallbackProcessor{logger: c.logger, kind: "delete", parent: c}
|
||||
}
|
||||
|
||||
// Query could be used to register callbacks for querying objects with query methods like `Find`, `First`, `Related`, `Association`...
|
||||
// Refer `Create` for usage
|
||||
func (c *Callback) Query() *CallbackProcessor {
|
||||
return &CallbackProcessor{logger: c.logger, kind: "query", parent: c}
|
||||
}
|
||||
|
||||
// RowQuery could be used to register callbacks for querying objects with `Row`, `Rows`, refer `Create` for usage
|
||||
func (c *Callback) RowQuery() *CallbackProcessor {
|
||||
return &CallbackProcessor{logger: c.logger, kind: "row_query", parent: c}
|
||||
}
|
||||
|
||||
// After insert a new callback after callback `callbackName`, refer `Callbacks.Create`
|
||||
func (cp *CallbackProcessor) After(callbackName string) *CallbackProcessor {
|
||||
cp.after = callbackName
|
||||
return cp
|
||||
}
|
||||
|
||||
// Before insert a new callback before callback `callbackName`, refer `Callbacks.Create`
|
||||
func (cp *CallbackProcessor) Before(callbackName string) *CallbackProcessor {
|
||||
cp.before = callbackName
|
||||
return cp
|
||||
}
|
||||
|
||||
// Register a new callback, refer `Callbacks.Create`
|
||||
func (cp *CallbackProcessor) Register(callbackName string, callback func(scope *Scope)) {
|
||||
if cp.kind == "row_query" {
|
||||
if cp.before == "" && cp.after == "" && callbackName != "gorm:row_query" {
|
||||
cp.logger.Print("info", fmt.Sprintf("Registering RowQuery callback %v without specify order with Before(), After(), applying Before('gorm:row_query') by default for compatibility...", callbackName))
|
||||
cp.before = "gorm:row_query"
|
||||
}
|
||||
}
|
||||
|
||||
cp.logger.Print("info", fmt.Sprintf("[info] registering callback `%v` from %v", callbackName, fileWithLineNum()))
|
||||
cp.name = callbackName
|
||||
cp.processor = &callback
|
||||
cp.parent.processors = append(cp.parent.processors, cp)
|
||||
cp.parent.reorder()
|
||||
}
|
||||
|
||||
// Remove a registered callback
|
||||
// db.Callback().Create().Remove("gorm:update_time_stamp_when_create")
|
||||
func (cp *CallbackProcessor) Remove(callbackName string) {
|
||||
cp.logger.Print("info", fmt.Sprintf("[info] removing callback `%v` from %v", callbackName, fileWithLineNum()))
|
||||
cp.name = callbackName
|
||||
cp.remove = true
|
||||
cp.parent.processors = append(cp.parent.processors, cp)
|
||||
cp.parent.reorder()
|
||||
}
|
||||
|
||||
// Replace a registered callback with new callback
|
||||
// db.Callback().Create().Replace("gorm:update_time_stamp_when_create", func(*Scope) {
|
||||
// scope.SetColumn("CreatedAt", now)
|
||||
// scope.SetColumn("UpdatedAt", now)
|
||||
// })
|
||||
func (cp *CallbackProcessor) Replace(callbackName string, callback func(scope *Scope)) {
|
||||
cp.logger.Print("info", fmt.Sprintf("[info] replacing callback `%v` from %v", callbackName, fileWithLineNum()))
|
||||
cp.name = callbackName
|
||||
cp.processor = &callback
|
||||
cp.replace = true
|
||||
cp.parent.processors = append(cp.parent.processors, cp)
|
||||
cp.parent.reorder()
|
||||
}
|
||||
|
||||
// Get registered callback
|
||||
// db.Callback().Create().Get("gorm:create")
|
||||
func (cp *CallbackProcessor) Get(callbackName string) (callback func(scope *Scope)) {
|
||||
for _, p := range cp.parent.processors {
|
||||
if p.name == callbackName && p.kind == cp.kind {
|
||||
if p.remove {
|
||||
callback = nil
|
||||
} else {
|
||||
callback = *p.processor
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getRIndex get right index from string slice
|
||||
func getRIndex(strs []string, str string) int {
|
||||
for i := len(strs) - 1; i >= 0; i-- {
|
||||
if strs[i] == str {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// sortProcessors sort callback processors based on its before, after, remove, replace
|
||||
func sortProcessors(cps []*CallbackProcessor) []*func(scope *Scope) {
|
||||
var (
|
||||
allNames, sortedNames []string
|
||||
sortCallbackProcessor func(c *CallbackProcessor)
|
||||
)
|
||||
|
||||
for _, cp := range cps {
|
||||
// show warning message the callback name already exists
|
||||
if index := getRIndex(allNames, cp.name); index > -1 && !cp.replace && !cp.remove {
|
||||
cp.logger.Print("warning", fmt.Sprintf("[warning] duplicated callback `%v` from %v", cp.name, fileWithLineNum()))
|
||||
}
|
||||
allNames = append(allNames, cp.name)
|
||||
}
|
||||
|
||||
sortCallbackProcessor = func(c *CallbackProcessor) {
|
||||
if getRIndex(sortedNames, c.name) == -1 { // if not sorted
|
||||
if c.before != "" { // if defined before callback
|
||||
if index := getRIndex(sortedNames, c.before); index != -1 {
|
||||
// if before callback already sorted, append current callback just after it
|
||||
sortedNames = append(sortedNames[:index], append([]string{c.name}, sortedNames[index:]...)...)
|
||||
} else if index := getRIndex(allNames, c.before); index != -1 {
|
||||
// if before callback exists but haven't sorted, append current callback to last
|
||||
sortedNames = append(sortedNames, c.name)
|
||||
sortCallbackProcessor(cps[index])
|
||||
}
|
||||
}
|
||||
|
||||
if c.after != "" { // if defined after callback
|
||||
if index := getRIndex(sortedNames, c.after); index != -1 {
|
||||
// if after callback already sorted, append current callback just before it
|
||||
sortedNames = append(sortedNames[:index+1], append([]string{c.name}, sortedNames[index+1:]...)...)
|
||||
} else if index := getRIndex(allNames, c.after); index != -1 {
|
||||
// if after callback exists but haven't sorted
|
||||
cp := cps[index]
|
||||
// set after callback's before callback to current callback
|
||||
if cp.before == "" {
|
||||
cp.before = c.name
|
||||
}
|
||||
sortCallbackProcessor(cp)
|
||||
}
|
||||
}
|
||||
|
||||
// if current callback haven't been sorted, append it to last
|
||||
if getRIndex(sortedNames, c.name) == -1 {
|
||||
sortedNames = append(sortedNames, c.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, cp := range cps {
|
||||
sortCallbackProcessor(cp)
|
||||
}
|
||||
|
||||
var sortedFuncs []*func(scope *Scope)
|
||||
for _, name := range sortedNames {
|
||||
if index := getRIndex(allNames, name); !cps[index].remove {
|
||||
sortedFuncs = append(sortedFuncs, cps[index].processor)
|
||||
}
|
||||
}
|
||||
|
||||
return sortedFuncs
|
||||
}
|
||||
|
||||
// reorder all registered processors, and reset CRUD callbacks
|
||||
func (c *Callback) reorder() {
|
||||
var creates, updates, deletes, queries, rowQueries []*CallbackProcessor
|
||||
|
||||
for _, processor := range c.processors {
|
||||
if processor.name != "" {
|
||||
switch processor.kind {
|
||||
case "create":
|
||||
creates = append(creates, processor)
|
||||
case "update":
|
||||
updates = append(updates, processor)
|
||||
case "delete":
|
||||
deletes = append(deletes, processor)
|
||||
case "query":
|
||||
queries = append(queries, processor)
|
||||
case "row_query":
|
||||
rowQueries = append(rowQueries, processor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.creates = sortProcessors(creates)
|
||||
c.updates = sortProcessors(updates)
|
||||
c.deletes = sortProcessors(deletes)
|
||||
c.queries = sortProcessors(queries)
|
||||
c.rowQueries = sortProcessors(rowQueries)
|
||||
}
|
197
vendor/github.com/jinzhu/gorm/callback_create.go
generated
vendored
Normal file
197
vendor/github.com/jinzhu/gorm/callback_create.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Define callbacks for creating
|
||||
func init() {
|
||||
DefaultCallback.Create().Register("gorm:begin_transaction", beginTransactionCallback)
|
||||
DefaultCallback.Create().Register("gorm:before_create", beforeCreateCallback)
|
||||
DefaultCallback.Create().Register("gorm:save_before_associations", saveBeforeAssociationsCallback)
|
||||
DefaultCallback.Create().Register("gorm:update_time_stamp", updateTimeStampForCreateCallback)
|
||||
DefaultCallback.Create().Register("gorm:create", createCallback)
|
||||
DefaultCallback.Create().Register("gorm:force_reload_after_create", forceReloadAfterCreateCallback)
|
||||
DefaultCallback.Create().Register("gorm:save_after_associations", saveAfterAssociationsCallback)
|
||||
DefaultCallback.Create().Register("gorm:after_create", afterCreateCallback)
|
||||
DefaultCallback.Create().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
|
||||
}
|
||||
|
||||
// beforeCreateCallback will invoke `BeforeSave`, `BeforeCreate` method before creating
|
||||
func beforeCreateCallback(scope *Scope) {
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("BeforeSave")
|
||||
}
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("BeforeCreate")
|
||||
}
|
||||
}
|
||||
|
||||
// updateTimeStampForCreateCallback will set `CreatedAt`, `UpdatedAt` when creating
|
||||
func updateTimeStampForCreateCallback(scope *Scope) {
|
||||
if !scope.HasError() {
|
||||
now := scope.db.nowFunc()
|
||||
|
||||
if createdAtField, ok := scope.FieldByName("CreatedAt"); ok {
|
||||
if createdAtField.IsBlank {
|
||||
createdAtField.Set(now)
|
||||
}
|
||||
}
|
||||
|
||||
if updatedAtField, ok := scope.FieldByName("UpdatedAt"); ok {
|
||||
if updatedAtField.IsBlank {
|
||||
updatedAtField.Set(now)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// createCallback the callback used to insert data into database
|
||||
func createCallback(scope *Scope) {
|
||||
if !scope.HasError() {
|
||||
defer scope.trace(NowFunc())
|
||||
|
||||
var (
|
||||
columns, placeholders []string
|
||||
blankColumnsWithDefaultValue []string
|
||||
)
|
||||
|
||||
for _, field := range scope.Fields() {
|
||||
if scope.changeableField(field) {
|
||||
if field.IsNormal && !field.IsIgnored {
|
||||
if field.IsBlank && field.HasDefaultValue {
|
||||
blankColumnsWithDefaultValue = append(blankColumnsWithDefaultValue, scope.Quote(field.DBName))
|
||||
scope.InstanceSet("gorm:blank_columns_with_default_value", blankColumnsWithDefaultValue)
|
||||
} else if !field.IsPrimaryKey || !field.IsBlank {
|
||||
columns = append(columns, scope.Quote(field.DBName))
|
||||
placeholders = append(placeholders, scope.AddToVars(field.Field.Interface()))
|
||||
}
|
||||
} else if field.Relationship != nil && field.Relationship.Kind == "belongs_to" {
|
||||
for _, foreignKey := range field.Relationship.ForeignDBNames {
|
||||
if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) {
|
||||
columns = append(columns, scope.Quote(foreignField.DBName))
|
||||
placeholders = append(placeholders, scope.AddToVars(foreignField.Field.Interface()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
returningColumn = "*"
|
||||
quotedTableName = scope.QuotedTableName()
|
||||
primaryField = scope.PrimaryField()
|
||||
extraOption string
|
||||
insertModifier string
|
||||
)
|
||||
|
||||
if str, ok := scope.Get("gorm:insert_option"); ok {
|
||||
extraOption = fmt.Sprint(str)
|
||||
}
|
||||
if str, ok := scope.Get("gorm:insert_modifier"); ok {
|
||||
insertModifier = strings.ToUpper(fmt.Sprint(str))
|
||||
if insertModifier == "INTO" {
|
||||
insertModifier = ""
|
||||
}
|
||||
}
|
||||
|
||||
if primaryField != nil {
|
||||
returningColumn = scope.Quote(primaryField.DBName)
|
||||
}
|
||||
|
||||
lastInsertIDOutputInterstitial := scope.Dialect().LastInsertIDOutputInterstitial(quotedTableName, returningColumn, columns)
|
||||
var lastInsertIDReturningSuffix string
|
||||
if lastInsertIDOutputInterstitial == "" {
|
||||
lastInsertIDReturningSuffix = scope.Dialect().LastInsertIDReturningSuffix(quotedTableName, returningColumn)
|
||||
}
|
||||
|
||||
if len(columns) == 0 {
|
||||
scope.Raw(fmt.Sprintf(
|
||||
"INSERT%v INTO %v %v%v%v",
|
||||
addExtraSpaceIfExist(insertModifier),
|
||||
quotedTableName,
|
||||
scope.Dialect().DefaultValueStr(),
|
||||
addExtraSpaceIfExist(extraOption),
|
||||
addExtraSpaceIfExist(lastInsertIDReturningSuffix),
|
||||
))
|
||||
} else {
|
||||
scope.Raw(fmt.Sprintf(
|
||||
"INSERT%v INTO %v (%v)%v VALUES (%v)%v%v",
|
||||
addExtraSpaceIfExist(insertModifier),
|
||||
scope.QuotedTableName(),
|
||||
strings.Join(columns, ","),
|
||||
addExtraSpaceIfExist(lastInsertIDOutputInterstitial),
|
||||
strings.Join(placeholders, ","),
|
||||
addExtraSpaceIfExist(extraOption),
|
||||
addExtraSpaceIfExist(lastInsertIDReturningSuffix),
|
||||
))
|
||||
}
|
||||
|
||||
// execute create sql: no primaryField
|
||||
if primaryField == nil {
|
||||
if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
|
||||
// set rows affected count
|
||||
scope.db.RowsAffected, _ = result.RowsAffected()
|
||||
|
||||
// set primary value to primary field
|
||||
if primaryField != nil && primaryField.IsBlank {
|
||||
if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil {
|
||||
scope.Err(primaryField.Set(primaryValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// execute create sql: lastInsertID implemention for majority of dialects
|
||||
if lastInsertIDReturningSuffix == "" && lastInsertIDOutputInterstitial == "" {
|
||||
if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
|
||||
// set rows affected count
|
||||
scope.db.RowsAffected, _ = result.RowsAffected()
|
||||
|
||||
// set primary value to primary field
|
||||
if primaryField != nil && primaryField.IsBlank {
|
||||
if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil {
|
||||
scope.Err(primaryField.Set(primaryValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// execute create sql: dialects with additional lastInsertID requirements (currently postgres & mssql)
|
||||
if primaryField.Field.CanAddr() {
|
||||
if err := scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...).Scan(primaryField.Field.Addr().Interface()); scope.Err(err) == nil {
|
||||
primaryField.IsBlank = false
|
||||
scope.db.RowsAffected = 1
|
||||
}
|
||||
} else {
|
||||
scope.Err(ErrUnaddressable)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// forceReloadAfterCreateCallback will reload columns that having default value, and set it back to current object
|
||||
func forceReloadAfterCreateCallback(scope *Scope) {
|
||||
if blankColumnsWithDefaultValue, ok := scope.InstanceGet("gorm:blank_columns_with_default_value"); ok {
|
||||
db := scope.DB().New().Table(scope.TableName()).Select(blankColumnsWithDefaultValue.([]string))
|
||||
for _, field := range scope.Fields() {
|
||||
if field.IsPrimaryKey && !field.IsBlank {
|
||||
db = db.Where(fmt.Sprintf("%v = ?", field.DBName), field.Field.Interface())
|
||||
}
|
||||
}
|
||||
db.Scan(scope.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// afterCreateCallback will invoke `AfterCreate`, `AfterSave` method after creating
|
||||
func afterCreateCallback(scope *Scope) {
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("AfterCreate")
|
||||
}
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("AfterSave")
|
||||
}
|
||||
}
|
63
vendor/github.com/jinzhu/gorm/callback_delete.go
generated
vendored
Normal file
63
vendor/github.com/jinzhu/gorm/callback_delete.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Define callbacks for deleting
|
||||
func init() {
|
||||
DefaultCallback.Delete().Register("gorm:begin_transaction", beginTransactionCallback)
|
||||
DefaultCallback.Delete().Register("gorm:before_delete", beforeDeleteCallback)
|
||||
DefaultCallback.Delete().Register("gorm:delete", deleteCallback)
|
||||
DefaultCallback.Delete().Register("gorm:after_delete", afterDeleteCallback)
|
||||
DefaultCallback.Delete().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
|
||||
}
|
||||
|
||||
// beforeDeleteCallback will invoke `BeforeDelete` method before deleting
|
||||
func beforeDeleteCallback(scope *Scope) {
|
||||
if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() {
|
||||
scope.Err(errors.New("missing WHERE clause while deleting"))
|
||||
return
|
||||
}
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("BeforeDelete")
|
||||
}
|
||||
}
|
||||
|
||||
// deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete)
|
||||
func deleteCallback(scope *Scope) {
|
||||
if !scope.HasError() {
|
||||
var extraOption string
|
||||
if str, ok := scope.Get("gorm:delete_option"); ok {
|
||||
extraOption = fmt.Sprint(str)
|
||||
}
|
||||
|
||||
deletedAtField, hasDeletedAtField := scope.FieldByName("DeletedAt")
|
||||
|
||||
if !scope.Search.Unscoped && hasDeletedAtField {
|
||||
scope.Raw(fmt.Sprintf(
|
||||
"UPDATE %v SET %v=%v%v%v",
|
||||
scope.QuotedTableName(),
|
||||
scope.Quote(deletedAtField.DBName),
|
||||
scope.AddToVars(scope.db.nowFunc()),
|
||||
addExtraSpaceIfExist(scope.CombinedConditionSql()),
|
||||
addExtraSpaceIfExist(extraOption),
|
||||
)).Exec()
|
||||
} else {
|
||||
scope.Raw(fmt.Sprintf(
|
||||
"DELETE FROM %v%v%v",
|
||||
scope.QuotedTableName(),
|
||||
addExtraSpaceIfExist(scope.CombinedConditionSql()),
|
||||
addExtraSpaceIfExist(extraOption),
|
||||
)).Exec()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// afterDeleteCallback will invoke `AfterDelete` method after deleting
|
||||
func afterDeleteCallback(scope *Scope) {
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("AfterDelete")
|
||||
}
|
||||
}
|
109
vendor/github.com/jinzhu/gorm/callback_query.go
generated
vendored
Normal file
109
vendor/github.com/jinzhu/gorm/callback_query.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Define callbacks for querying
|
||||
func init() {
|
||||
DefaultCallback.Query().Register("gorm:query", queryCallback)
|
||||
DefaultCallback.Query().Register("gorm:preload", preloadCallback)
|
||||
DefaultCallback.Query().Register("gorm:after_query", afterQueryCallback)
|
||||
}
|
||||
|
||||
// queryCallback used to query data from database
|
||||
func queryCallback(scope *Scope) {
|
||||
if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
|
||||
return
|
||||
}
|
||||
|
||||
//we are only preloading relations, dont touch base model
|
||||
if _, skip := scope.InstanceGet("gorm:only_preload"); skip {
|
||||
return
|
||||
}
|
||||
|
||||
defer scope.trace(NowFunc())
|
||||
|
||||
var (
|
||||
isSlice, isPtr bool
|
||||
resultType reflect.Type
|
||||
results = scope.IndirectValue()
|
||||
)
|
||||
|
||||
if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok {
|
||||
if primaryField := scope.PrimaryField(); primaryField != nil {
|
||||
scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryField.DBName), orderBy))
|
||||
}
|
||||
}
|
||||
|
||||
if value, ok := scope.Get("gorm:query_destination"); ok {
|
||||
results = indirect(reflect.ValueOf(value))
|
||||
}
|
||||
|
||||
if kind := results.Kind(); kind == reflect.Slice {
|
||||
isSlice = true
|
||||
resultType = results.Type().Elem()
|
||||
results.Set(reflect.MakeSlice(results.Type(), 0, 0))
|
||||
|
||||
if resultType.Kind() == reflect.Ptr {
|
||||
isPtr = true
|
||||
resultType = resultType.Elem()
|
||||
}
|
||||
} else if kind != reflect.Struct {
|
||||
scope.Err(errors.New("unsupported destination, should be slice or struct"))
|
||||
return
|
||||
}
|
||||
|
||||
scope.prepareQuerySQL()
|
||||
|
||||
if !scope.HasError() {
|
||||
scope.db.RowsAffected = 0
|
||||
|
||||
if str, ok := scope.Get("gorm:query_hint"); ok {
|
||||
scope.SQL = fmt.Sprint(str) + scope.SQL
|
||||
}
|
||||
|
||||
if str, ok := scope.Get("gorm:query_option"); ok {
|
||||
scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str))
|
||||
}
|
||||
|
||||
if rows, err := scope.SQLDB().Query(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
|
||||
defer rows.Close()
|
||||
|
||||
columns, _ := rows.Columns()
|
||||
for rows.Next() {
|
||||
scope.db.RowsAffected++
|
||||
|
||||
elem := results
|
||||
if isSlice {
|
||||
elem = reflect.New(resultType).Elem()
|
||||
}
|
||||
|
||||
scope.scan(rows, columns, scope.New(elem.Addr().Interface()).Fields())
|
||||
|
||||
if isSlice {
|
||||
if isPtr {
|
||||
results.Set(reflect.Append(results, elem.Addr()))
|
||||
} else {
|
||||
results.Set(reflect.Append(results, elem))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
scope.Err(err)
|
||||
} else if scope.db.RowsAffected == 0 && !isSlice {
|
||||
scope.Err(ErrRecordNotFound)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// afterQueryCallback will invoke `AfterFind` method after querying
|
||||
func afterQueryCallback(scope *Scope) {
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("AfterFind")
|
||||
}
|
||||
}
|
410
vendor/github.com/jinzhu/gorm/callback_query_preload.go
generated
vendored
Normal file
410
vendor/github.com/jinzhu/gorm/callback_query_preload.go
generated
vendored
Normal file
@ -0,0 +1,410 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// preloadCallback used to preload associations
|
||||
func preloadCallback(scope *Scope) {
|
||||
if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
|
||||
return
|
||||
}
|
||||
|
||||
if ap, ok := scope.Get("gorm:auto_preload"); ok {
|
||||
// If gorm:auto_preload IS NOT a bool then auto preload.
|
||||
// Else if it IS a bool, use the value
|
||||
if apb, ok := ap.(bool); !ok {
|
||||
autoPreload(scope)
|
||||
} else if apb {
|
||||
autoPreload(scope)
|
||||
}
|
||||
}
|
||||
|
||||
if scope.Search.preload == nil || scope.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
preloadedMap = map[string]bool{}
|
||||
fields = scope.Fields()
|
||||
)
|
||||
|
||||
for _, preload := range scope.Search.preload {
|
||||
var (
|
||||
preloadFields = strings.Split(preload.schema, ".")
|
||||
currentScope = scope
|
||||
currentFields = fields
|
||||
)
|
||||
|
||||
for idx, preloadField := range preloadFields {
|
||||
var currentPreloadConditions []interface{}
|
||||
|
||||
if currentScope == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if not preloaded
|
||||
if preloadKey := strings.Join(preloadFields[:idx+1], "."); !preloadedMap[preloadKey] {
|
||||
|
||||
// assign search conditions to last preload
|
||||
if idx == len(preloadFields)-1 {
|
||||
currentPreloadConditions = preload.conditions
|
||||
}
|
||||
|
||||
for _, field := range currentFields {
|
||||
if field.Name != preloadField || field.Relationship == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch field.Relationship.Kind {
|
||||
case "has_one":
|
||||
currentScope.handleHasOnePreload(field, currentPreloadConditions)
|
||||
case "has_many":
|
||||
currentScope.handleHasManyPreload(field, currentPreloadConditions)
|
||||
case "belongs_to":
|
||||
currentScope.handleBelongsToPreload(field, currentPreloadConditions)
|
||||
case "many_to_many":
|
||||
currentScope.handleManyToManyPreload(field, currentPreloadConditions)
|
||||
default:
|
||||
scope.Err(errors.New("unsupported relation"))
|
||||
}
|
||||
|
||||
preloadedMap[preloadKey] = true
|
||||
break
|
||||
}
|
||||
|
||||
if !preloadedMap[preloadKey] {
|
||||
scope.Err(fmt.Errorf("can't preload field %s for %s", preloadField, currentScope.GetModelStruct().ModelType))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// preload next level
|
||||
if idx < len(preloadFields)-1 {
|
||||
currentScope = currentScope.getColumnAsScope(preloadField)
|
||||
if currentScope != nil {
|
||||
currentFields = currentScope.Fields()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func autoPreload(scope *Scope) {
|
||||
for _, field := range scope.Fields() {
|
||||
if field.Relationship == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if val, ok := field.TagSettingsGet("PRELOAD"); ok {
|
||||
if preload, err := strconv.ParseBool(val); err != nil {
|
||||
scope.Err(errors.New("invalid preload option"))
|
||||
return
|
||||
} else if !preload {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
scope.Search.Preload(field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (scope *Scope) generatePreloadDBWithConditions(conditions []interface{}) (*DB, []interface{}) {
|
||||
var (
|
||||
preloadDB = scope.NewDB()
|
||||
preloadConditions []interface{}
|
||||
)
|
||||
|
||||
for _, condition := range conditions {
|
||||
if scopes, ok := condition.(func(*DB) *DB); ok {
|
||||
preloadDB = scopes(preloadDB)
|
||||
} else {
|
||||
preloadConditions = append(preloadConditions, condition)
|
||||
}
|
||||
}
|
||||
|
||||
return preloadDB, preloadConditions
|
||||
}
|
||||
|
||||
// handleHasOnePreload used to preload has one associations
|
||||
func (scope *Scope) handleHasOnePreload(field *Field, conditions []interface{}) {
|
||||
relation := field.Relationship
|
||||
|
||||
// get relations's primary keys
|
||||
primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value)
|
||||
if len(primaryKeys) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// preload conditions
|
||||
preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
|
||||
|
||||
// find relations
|
||||
query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys))
|
||||
values := toQueryValues(primaryKeys)
|
||||
if relation.PolymorphicType != "" {
|
||||
query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName))
|
||||
values = append(values, relation.PolymorphicValue)
|
||||
}
|
||||
|
||||
results := makeSlice(field.Struct.Type)
|
||||
scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error)
|
||||
|
||||
// assign find results
|
||||
var (
|
||||
resultsValue = indirect(reflect.ValueOf(results))
|
||||
indirectScopeValue = scope.IndirectValue()
|
||||
)
|
||||
|
||||
if indirectScopeValue.Kind() == reflect.Slice {
|
||||
foreignValuesToResults := make(map[string]reflect.Value)
|
||||
for i := 0; i < resultsValue.Len(); i++ {
|
||||
result := resultsValue.Index(i)
|
||||
foreignValues := toString(getValueFromFields(result, relation.ForeignFieldNames))
|
||||
foreignValuesToResults[foreignValues] = result
|
||||
}
|
||||
for j := 0; j < indirectScopeValue.Len(); j++ {
|
||||
indirectValue := indirect(indirectScopeValue.Index(j))
|
||||
valueString := toString(getValueFromFields(indirectValue, relation.AssociationForeignFieldNames))
|
||||
if result, found := foreignValuesToResults[valueString]; found {
|
||||
indirectValue.FieldByName(field.Name).Set(result)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < resultsValue.Len(); i++ {
|
||||
result := resultsValue.Index(i)
|
||||
scope.Err(field.Set(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleHasManyPreload used to preload has many associations
|
||||
func (scope *Scope) handleHasManyPreload(field *Field, conditions []interface{}) {
|
||||
relation := field.Relationship
|
||||
|
||||
// get relations's primary keys
|
||||
primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value)
|
||||
if len(primaryKeys) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// preload conditions
|
||||
preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
|
||||
|
||||
// find relations
|
||||
query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys))
|
||||
values := toQueryValues(primaryKeys)
|
||||
if relation.PolymorphicType != "" {
|
||||
query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName))
|
||||
values = append(values, relation.PolymorphicValue)
|
||||
}
|
||||
|
||||
results := makeSlice(field.Struct.Type)
|
||||
scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error)
|
||||
|
||||
// assign find results
|
||||
var (
|
||||
resultsValue = indirect(reflect.ValueOf(results))
|
||||
indirectScopeValue = scope.IndirectValue()
|
||||
)
|
||||
|
||||
if indirectScopeValue.Kind() == reflect.Slice {
|
||||
preloadMap := make(map[string][]reflect.Value)
|
||||
for i := 0; i < resultsValue.Len(); i++ {
|
||||
result := resultsValue.Index(i)
|
||||
foreignValues := getValueFromFields(result, relation.ForeignFieldNames)
|
||||
preloadMap[toString(foreignValues)] = append(preloadMap[toString(foreignValues)], result)
|
||||
}
|
||||
|
||||
for j := 0; j < indirectScopeValue.Len(); j++ {
|
||||
object := indirect(indirectScopeValue.Index(j))
|
||||
objectRealValue := getValueFromFields(object, relation.AssociationForeignFieldNames)
|
||||
f := object.FieldByName(field.Name)
|
||||
if results, ok := preloadMap[toString(objectRealValue)]; ok {
|
||||
f.Set(reflect.Append(f, results...))
|
||||
} else {
|
||||
f.Set(reflect.MakeSlice(f.Type(), 0, 0))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
scope.Err(field.Set(resultsValue))
|
||||
}
|
||||
}
|
||||
|
||||
// handleBelongsToPreload used to preload belongs to associations
|
||||
func (scope *Scope) handleBelongsToPreload(field *Field, conditions []interface{}) {
|
||||
relation := field.Relationship
|
||||
|
||||
// preload conditions
|
||||
preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
|
||||
|
||||
// get relations's primary keys
|
||||
primaryKeys := scope.getColumnAsArray(relation.ForeignFieldNames, scope.Value)
|
||||
if len(primaryKeys) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// find relations
|
||||
results := makeSlice(field.Struct.Type)
|
||||
scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.AssociationForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error)
|
||||
|
||||
// assign find results
|
||||
var (
|
||||
resultsValue = indirect(reflect.ValueOf(results))
|
||||
indirectScopeValue = scope.IndirectValue()
|
||||
)
|
||||
|
||||
foreignFieldToObjects := make(map[string][]*reflect.Value)
|
||||
if indirectScopeValue.Kind() == reflect.Slice {
|
||||
for j := 0; j < indirectScopeValue.Len(); j++ {
|
||||
object := indirect(indirectScopeValue.Index(j))
|
||||
valueString := toString(getValueFromFields(object, relation.ForeignFieldNames))
|
||||
foreignFieldToObjects[valueString] = append(foreignFieldToObjects[valueString], &object)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < resultsValue.Len(); i++ {
|
||||
result := resultsValue.Index(i)
|
||||
if indirectScopeValue.Kind() == reflect.Slice {
|
||||
valueString := toString(getValueFromFields(result, relation.AssociationForeignFieldNames))
|
||||
if objects, found := foreignFieldToObjects[valueString]; found {
|
||||
for _, object := range objects {
|
||||
object.FieldByName(field.Name).Set(result)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
scope.Err(field.Set(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleManyToManyPreload used to preload many to many associations
|
||||
func (scope *Scope) handleManyToManyPreload(field *Field, conditions []interface{}) {
|
||||
var (
|
||||
relation = field.Relationship
|
||||
joinTableHandler = relation.JoinTableHandler
|
||||
fieldType = field.Struct.Type.Elem()
|
||||
foreignKeyValue interface{}
|
||||
foreignKeyType = reflect.ValueOf(&foreignKeyValue).Type()
|
||||
linkHash = map[string][]reflect.Value{}
|
||||
isPtr bool
|
||||
)
|
||||
|
||||
if fieldType.Kind() == reflect.Ptr {
|
||||
isPtr = true
|
||||
fieldType = fieldType.Elem()
|
||||
}
|
||||
|
||||
var sourceKeys = []string{}
|
||||
for _, key := range joinTableHandler.SourceForeignKeys() {
|
||||
sourceKeys = append(sourceKeys, key.DBName)
|
||||
}
|
||||
|
||||
// preload conditions
|
||||
preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
|
||||
|
||||
// generate query with join table
|
||||
newScope := scope.New(reflect.New(fieldType).Interface())
|
||||
preloadDB = preloadDB.Table(newScope.TableName()).Model(newScope.Value)
|
||||
|
||||
if len(preloadDB.search.selects) == 0 {
|
||||
preloadDB = preloadDB.Select("*")
|
||||
}
|
||||
|
||||
preloadDB = joinTableHandler.JoinWith(joinTableHandler, preloadDB, scope.Value)
|
||||
|
||||
// preload inline conditions
|
||||
if len(preloadConditions) > 0 {
|
||||
preloadDB = preloadDB.Where(preloadConditions[0], preloadConditions[1:]...)
|
||||
}
|
||||
|
||||
rows, err := preloadDB.Rows()
|
||||
|
||||
if scope.Err(err) != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
columns, _ := rows.Columns()
|
||||
for rows.Next() {
|
||||
var (
|
||||
elem = reflect.New(fieldType).Elem()
|
||||
fields = scope.New(elem.Addr().Interface()).Fields()
|
||||
)
|
||||
|
||||
// register foreign keys in join tables
|
||||
var joinTableFields []*Field
|
||||
for _, sourceKey := range sourceKeys {
|
||||
joinTableFields = append(joinTableFields, &Field{StructField: &StructField{DBName: sourceKey, IsNormal: true}, Field: reflect.New(foreignKeyType).Elem()})
|
||||
}
|
||||
|
||||
scope.scan(rows, columns, append(fields, joinTableFields...))
|
||||
|
||||
scope.New(elem.Addr().Interface()).
|
||||
InstanceSet("gorm:skip_query_callback", true).
|
||||
callCallbacks(scope.db.parent.callbacks.queries)
|
||||
|
||||
var foreignKeys = make([]interface{}, len(sourceKeys))
|
||||
// generate hashed forkey keys in join table
|
||||
for idx, joinTableField := range joinTableFields {
|
||||
if !joinTableField.Field.IsNil() {
|
||||
foreignKeys[idx] = joinTableField.Field.Elem().Interface()
|
||||
}
|
||||
}
|
||||
hashedSourceKeys := toString(foreignKeys)
|
||||
|
||||
if isPtr {
|
||||
linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem.Addr())
|
||||
} else {
|
||||
linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
scope.Err(err)
|
||||
}
|
||||
|
||||
// assign find results
|
||||
var (
|
||||
indirectScopeValue = scope.IndirectValue()
|
||||
fieldsSourceMap = map[string][]reflect.Value{}
|
||||
foreignFieldNames = []string{}
|
||||
)
|
||||
|
||||
for _, dbName := range relation.ForeignFieldNames {
|
||||
if field, ok := scope.FieldByName(dbName); ok {
|
||||
foreignFieldNames = append(foreignFieldNames, field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if indirectScopeValue.Kind() == reflect.Slice {
|
||||
for j := 0; j < indirectScopeValue.Len(); j++ {
|
||||
object := indirect(indirectScopeValue.Index(j))
|
||||
key := toString(getValueFromFields(object, foreignFieldNames))
|
||||
fieldsSourceMap[key] = append(fieldsSourceMap[key], object.FieldByName(field.Name))
|
||||
}
|
||||
} else if indirectScopeValue.IsValid() {
|
||||
key := toString(getValueFromFields(indirectScopeValue, foreignFieldNames))
|
||||
fieldsSourceMap[key] = append(fieldsSourceMap[key], indirectScopeValue.FieldByName(field.Name))
|
||||
}
|
||||
|
||||
for source, fields := range fieldsSourceMap {
|
||||
for _, f := range fields {
|
||||
//If not 0 this means Value is a pointer and we already added preloaded models to it
|
||||
if f.Len() != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
v := reflect.MakeSlice(f.Type(), 0, 0)
|
||||
if len(linkHash[source]) > 0 {
|
||||
v = reflect.Append(f, linkHash[source]...)
|
||||
}
|
||||
|
||||
f.Set(v)
|
||||
}
|
||||
}
|
||||
}
|
41
vendor/github.com/jinzhu/gorm/callback_row_query.go
generated
vendored
Normal file
41
vendor/github.com/jinzhu/gorm/callback_row_query.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Define callbacks for row query
|
||||
func init() {
|
||||
DefaultCallback.RowQuery().Register("gorm:row_query", rowQueryCallback)
|
||||
}
|
||||
|
||||
type RowQueryResult struct {
|
||||
Row *sql.Row
|
||||
}
|
||||
|
||||
type RowsQueryResult struct {
|
||||
Rows *sql.Rows
|
||||
Error error
|
||||
}
|
||||
|
||||
// queryCallback used to query data from database
|
||||
func rowQueryCallback(scope *Scope) {
|
||||
if result, ok := scope.InstanceGet("row_query_result"); ok {
|
||||
scope.prepareQuerySQL()
|
||||
|
||||
if str, ok := scope.Get("gorm:query_hint"); ok {
|
||||
scope.SQL = fmt.Sprint(str) + scope.SQL
|
||||
}
|
||||
|
||||
if str, ok := scope.Get("gorm:query_option"); ok {
|
||||
scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str))
|
||||
}
|
||||
|
||||
if rowResult, ok := result.(*RowQueryResult); ok {
|
||||
rowResult.Row = scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...)
|
||||
} else if rowsResult, ok := result.(*RowsQueryResult); ok {
|
||||
rowsResult.Rows, rowsResult.Error = scope.SQLDB().Query(scope.SQL, scope.SQLVars...)
|
||||
}
|
||||
}
|
||||
}
|
170
vendor/github.com/jinzhu/gorm/callback_save.go
generated
vendored
Normal file
170
vendor/github.com/jinzhu/gorm/callback_save.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func beginTransactionCallback(scope *Scope) {
|
||||
scope.Begin()
|
||||
}
|
||||
|
||||
func commitOrRollbackTransactionCallback(scope *Scope) {
|
||||
scope.CommitOrRollback()
|
||||
}
|
||||
|
||||
func saveAssociationCheck(scope *Scope, field *Field) (autoUpdate bool, autoCreate bool, saveReference bool, r *Relationship) {
|
||||
checkTruth := func(value interface{}) bool {
|
||||
if v, ok := value.(bool); ok && !v {
|
||||
return false
|
||||
}
|
||||
|
||||
if v, ok := value.(string); ok {
|
||||
v = strings.ToLower(v)
|
||||
return v == "true"
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored {
|
||||
if r = field.Relationship; r != nil {
|
||||
autoUpdate, autoCreate, saveReference = true, true, true
|
||||
|
||||
if value, ok := scope.Get("gorm:save_associations"); ok {
|
||||
autoUpdate = checkTruth(value)
|
||||
autoCreate = autoUpdate
|
||||
saveReference = autoUpdate
|
||||
} else if value, ok := field.TagSettingsGet("SAVE_ASSOCIATIONS"); ok {
|
||||
autoUpdate = checkTruth(value)
|
||||
autoCreate = autoUpdate
|
||||
saveReference = autoUpdate
|
||||
}
|
||||
|
||||
if value, ok := scope.Get("gorm:association_autoupdate"); ok {
|
||||
autoUpdate = checkTruth(value)
|
||||
} else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOUPDATE"); ok {
|
||||
autoUpdate = checkTruth(value)
|
||||
}
|
||||
|
||||
if value, ok := scope.Get("gorm:association_autocreate"); ok {
|
||||
autoCreate = checkTruth(value)
|
||||
} else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOCREATE"); ok {
|
||||
autoCreate = checkTruth(value)
|
||||
}
|
||||
|
||||
if value, ok := scope.Get("gorm:association_save_reference"); ok {
|
||||
saveReference = checkTruth(value)
|
||||
} else if value, ok := field.TagSettingsGet("ASSOCIATION_SAVE_REFERENCE"); ok {
|
||||
saveReference = checkTruth(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func saveBeforeAssociationsCallback(scope *Scope) {
|
||||
for _, field := range scope.Fields() {
|
||||
autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field)
|
||||
|
||||
if relationship != nil && relationship.Kind == "belongs_to" {
|
||||
fieldValue := field.Field.Addr().Interface()
|
||||
newScope := scope.New(fieldValue)
|
||||
|
||||
if newScope.PrimaryKeyZero() {
|
||||
if autoCreate {
|
||||
scope.Err(scope.NewDB().Save(fieldValue).Error)
|
||||
}
|
||||
} else if autoUpdate {
|
||||
scope.Err(scope.NewDB().Save(fieldValue).Error)
|
||||
}
|
||||
|
||||
if saveReference {
|
||||
if len(relationship.ForeignFieldNames) != 0 {
|
||||
// set value's foreign key
|
||||
for idx, fieldName := range relationship.ForeignFieldNames {
|
||||
associationForeignName := relationship.AssociationForeignDBNames[idx]
|
||||
if foreignField, ok := scope.New(fieldValue).FieldByName(associationForeignName); ok {
|
||||
scope.Err(scope.SetColumn(fieldName, foreignField.Field.Interface()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func saveAfterAssociationsCallback(scope *Scope) {
|
||||
for _, field := range scope.Fields() {
|
||||
autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field)
|
||||
|
||||
if relationship != nil && (relationship.Kind == "has_one" || relationship.Kind == "has_many" || relationship.Kind == "many_to_many") {
|
||||
value := field.Field
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Slice:
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
newDB := scope.NewDB()
|
||||
elem := value.Index(i).Addr().Interface()
|
||||
newScope := newDB.NewScope(elem)
|
||||
|
||||
if saveReference {
|
||||
if relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 {
|
||||
for idx, fieldName := range relationship.ForeignFieldNames {
|
||||
associationForeignName := relationship.AssociationForeignDBNames[idx]
|
||||
if f, ok := scope.FieldByName(associationForeignName); ok {
|
||||
scope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if relationship.PolymorphicType != "" {
|
||||
scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue))
|
||||
}
|
||||
}
|
||||
|
||||
if newScope.PrimaryKeyZero() {
|
||||
if autoCreate {
|
||||
scope.Err(newDB.Save(elem).Error)
|
||||
}
|
||||
} else if autoUpdate {
|
||||
scope.Err(newDB.Save(elem).Error)
|
||||
}
|
||||
|
||||
if !scope.New(newScope.Value).PrimaryKeyZero() && saveReference {
|
||||
if joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil {
|
||||
scope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
elem := value.Addr().Interface()
|
||||
newScope := scope.New(elem)
|
||||
|
||||
if saveReference {
|
||||
if len(relationship.ForeignFieldNames) != 0 {
|
||||
for idx, fieldName := range relationship.ForeignFieldNames {
|
||||
associationForeignName := relationship.AssociationForeignDBNames[idx]
|
||||
if f, ok := scope.FieldByName(associationForeignName); ok {
|
||||
scope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if relationship.PolymorphicType != "" {
|
||||
scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue))
|
||||
}
|
||||
}
|
||||
|
||||
if newScope.PrimaryKeyZero() {
|
||||
if autoCreate {
|
||||
scope.Err(scope.NewDB().Save(elem).Error)
|
||||
}
|
||||
} else if autoUpdate {
|
||||
scope.Err(scope.NewDB().Save(elem).Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
121
vendor/github.com/jinzhu/gorm/callback_update.go
generated
vendored
Normal file
121
vendor/github.com/jinzhu/gorm/callback_update.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Define callbacks for updating
|
||||
func init() {
|
||||
DefaultCallback.Update().Register("gorm:assign_updating_attributes", assignUpdatingAttributesCallback)
|
||||
DefaultCallback.Update().Register("gorm:begin_transaction", beginTransactionCallback)
|
||||
DefaultCallback.Update().Register("gorm:before_update", beforeUpdateCallback)
|
||||
DefaultCallback.Update().Register("gorm:save_before_associations", saveBeforeAssociationsCallback)
|
||||
DefaultCallback.Update().Register("gorm:update_time_stamp", updateTimeStampForUpdateCallback)
|
||||
DefaultCallback.Update().Register("gorm:update", updateCallback)
|
||||
DefaultCallback.Update().Register("gorm:save_after_associations", saveAfterAssociationsCallback)
|
||||
DefaultCallback.Update().Register("gorm:after_update", afterUpdateCallback)
|
||||
DefaultCallback.Update().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
|
||||
}
|
||||
|
||||
// assignUpdatingAttributesCallback assign updating attributes to model
|
||||
func assignUpdatingAttributesCallback(scope *Scope) {
|
||||
if attrs, ok := scope.InstanceGet("gorm:update_interface"); ok {
|
||||
if updateMaps, hasUpdate := scope.updatedAttrsWithValues(attrs); hasUpdate {
|
||||
scope.InstanceSet("gorm:update_attrs", updateMaps)
|
||||
} else {
|
||||
scope.SkipLeft()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// beforeUpdateCallback will invoke `BeforeSave`, `BeforeUpdate` method before updating
|
||||
func beforeUpdateCallback(scope *Scope) {
|
||||
if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() {
|
||||
scope.Err(errors.New("missing WHERE clause while updating"))
|
||||
return
|
||||
}
|
||||
if _, ok := scope.Get("gorm:update_column"); !ok {
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("BeforeSave")
|
||||
}
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("BeforeUpdate")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateTimeStampForUpdateCallback will set `UpdatedAt` when updating
|
||||
func updateTimeStampForUpdateCallback(scope *Scope) {
|
||||
if _, ok := scope.Get("gorm:update_column"); !ok {
|
||||
scope.SetColumn("UpdatedAt", scope.db.nowFunc())
|
||||
}
|
||||
}
|
||||
|
||||
// updateCallback the callback used to update data to database
|
||||
func updateCallback(scope *Scope) {
|
||||
if !scope.HasError() {
|
||||
var sqls []string
|
||||
|
||||
if updateAttrs, ok := scope.InstanceGet("gorm:update_attrs"); ok {
|
||||
// Sort the column names so that the generated SQL is the same every time.
|
||||
updateMap := updateAttrs.(map[string]interface{})
|
||||
var columns []string
|
||||
for c := range updateMap {
|
||||
columns = append(columns, c)
|
||||
}
|
||||
sort.Strings(columns)
|
||||
|
||||
for _, column := range columns {
|
||||
value := updateMap[column]
|
||||
sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(column), scope.AddToVars(value)))
|
||||
}
|
||||
} else {
|
||||
for _, field := range scope.Fields() {
|
||||
if scope.changeableField(field) {
|
||||
if !field.IsPrimaryKey && field.IsNormal && (field.Name != "CreatedAt" || !field.IsBlank) {
|
||||
if !field.IsForeignKey || !field.IsBlank || !field.HasDefaultValue {
|
||||
sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface())))
|
||||
}
|
||||
} else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" {
|
||||
for _, foreignKey := range relationship.ForeignDBNames {
|
||||
if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) {
|
||||
sqls = append(sqls,
|
||||
fmt.Sprintf("%v = %v", scope.Quote(foreignField.DBName), scope.AddToVars(foreignField.Field.Interface())))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var extraOption string
|
||||
if str, ok := scope.Get("gorm:update_option"); ok {
|
||||
extraOption = fmt.Sprint(str)
|
||||
}
|
||||
|
||||
if len(sqls) > 0 {
|
||||
scope.Raw(fmt.Sprintf(
|
||||
"UPDATE %v SET %v%v%v",
|
||||
scope.QuotedTableName(),
|
||||
strings.Join(sqls, ", "),
|
||||
addExtraSpaceIfExist(scope.CombinedConditionSql()),
|
||||
addExtraSpaceIfExist(extraOption),
|
||||
)).Exec()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// afterUpdateCallback will invoke `AfterUpdate`, `AfterSave` method after updating
|
||||
func afterUpdateCallback(scope *Scope) {
|
||||
if _, ok := scope.Get("gorm:update_column"); !ok {
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("AfterUpdate")
|
||||
}
|
||||
if !scope.HasError() {
|
||||
scope.CallMethod("AfterSave")
|
||||
}
|
||||
}
|
||||
}
|
147
vendor/github.com/jinzhu/gorm/dialect.go
generated
vendored
Normal file
147
vendor/github.com/jinzhu/gorm/dialect.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Dialect interface contains behaviors that differ across SQL database
|
||||
type Dialect interface {
|
||||
// GetName get dialect's name
|
||||
GetName() string
|
||||
|
||||
// SetDB set db for dialect
|
||||
SetDB(db SQLCommon)
|
||||
|
||||
// BindVar return the placeholder for actual values in SQL statements, in many dbs it is "?", Postgres using $1
|
||||
BindVar(i int) string
|
||||
// Quote quotes field name to avoid SQL parsing exceptions by using a reserved word as a field name
|
||||
Quote(key string) string
|
||||
// DataTypeOf return data's sql type
|
||||
DataTypeOf(field *StructField) string
|
||||
|
||||
// HasIndex check has index or not
|
||||
HasIndex(tableName string, indexName string) bool
|
||||
// HasForeignKey check has foreign key or not
|
||||
HasForeignKey(tableName string, foreignKeyName string) bool
|
||||
// RemoveIndex remove index
|
||||
RemoveIndex(tableName string, indexName string) error
|
||||
// HasTable check has table or not
|
||||
HasTable(tableName string) bool
|
||||
// HasColumn check has column or not
|
||||
HasColumn(tableName string, columnName string) bool
|
||||
// ModifyColumn modify column's type
|
||||
ModifyColumn(tableName string, columnName string, typ string) error
|
||||
|
||||
// LimitAndOffsetSQL return generated SQL with Limit and Offset, as mssql has special case
|
||||
LimitAndOffsetSQL(limit, offset interface{}) (string, error)
|
||||
// SelectFromDummyTable return select values, for most dbs, `SELECT values` just works, mysql needs `SELECT value FROM DUAL`
|
||||
SelectFromDummyTable() string
|
||||
// LastInsertIDOutputInterstitial most dbs support LastInsertId, but mssql needs to use `OUTPUT`
|
||||
LastInsertIDOutputInterstitial(tableName, columnName string, columns []string) string
|
||||
// LastInsertIdReturningSuffix most dbs support LastInsertId, but postgres needs to use `RETURNING`
|
||||
LastInsertIDReturningSuffix(tableName, columnName string) string
|
||||
// DefaultValueStr
|
||||
DefaultValueStr() string
|
||||
|
||||
// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
|
||||
BuildKeyName(kind, tableName string, fields ...string) string
|
||||
|
||||
// NormalizeIndexAndColumn returns valid index name and column name depending on each dialect
|
||||
NormalizeIndexAndColumn(indexName, columnName string) (string, string)
|
||||
|
||||
// CurrentDatabase return current database name
|
||||
CurrentDatabase() string
|
||||
}
|
||||
|
||||
var dialectsMap = map[string]Dialect{}
|
||||
|
||||
func newDialect(name string, db SQLCommon) Dialect {
|
||||
if value, ok := dialectsMap[name]; ok {
|
||||
dialect := reflect.New(reflect.TypeOf(value).Elem()).Interface().(Dialect)
|
||||
dialect.SetDB(db)
|
||||
return dialect
|
||||
}
|
||||
|
||||
fmt.Printf("`%v` is not officially supported, running under compatibility mode.\n", name)
|
||||
commontDialect := &commonDialect{}
|
||||
commontDialect.SetDB(db)
|
||||
return commontDialect
|
||||
}
|
||||
|
||||
// RegisterDialect register new dialect
|
||||
func RegisterDialect(name string, dialect Dialect) {
|
||||
dialectsMap[name] = dialect
|
||||
}
|
||||
|
||||
// GetDialect gets the dialect for the specified dialect name
|
||||
func GetDialect(name string) (dialect Dialect, ok bool) {
|
||||
dialect, ok = dialectsMap[name]
|
||||
return
|
||||
}
|
||||
|
||||
// ParseFieldStructForDialect get field's sql data type
|
||||
var ParseFieldStructForDialect = func(field *StructField, dialect Dialect) (fieldValue reflect.Value, sqlType string, size int, additionalType string) {
|
||||
// Get redirected field type
|
||||
var (
|
||||
reflectType = field.Struct.Type
|
||||
dataType, _ = field.TagSettingsGet("TYPE")
|
||||
)
|
||||
|
||||
for reflectType.Kind() == reflect.Ptr {
|
||||
reflectType = reflectType.Elem()
|
||||
}
|
||||
|
||||
// Get redirected field value
|
||||
fieldValue = reflect.Indirect(reflect.New(reflectType))
|
||||
|
||||
if gormDataType, ok := fieldValue.Interface().(interface {
|
||||
GormDataType(Dialect) string
|
||||
}); ok {
|
||||
dataType = gormDataType.GormDataType(dialect)
|
||||
}
|
||||
|
||||
// Get scanner's real value
|
||||
if dataType == "" {
|
||||
var getScannerValue func(reflect.Value)
|
||||
getScannerValue = func(value reflect.Value) {
|
||||
fieldValue = value
|
||||
if _, isScanner := reflect.New(fieldValue.Type()).Interface().(sql.Scanner); isScanner && fieldValue.Kind() == reflect.Struct {
|
||||
getScannerValue(fieldValue.Field(0))
|
||||
}
|
||||
}
|
||||
getScannerValue(fieldValue)
|
||||
}
|
||||
|
||||
// Default Size
|
||||
if num, ok := field.TagSettingsGet("SIZE"); ok {
|
||||
size, _ = strconv.Atoi(num)
|
||||
} else {
|
||||
size = 255
|
||||
}
|
||||
|
||||
// Default type from tag setting
|
||||
notNull, _ := field.TagSettingsGet("NOT NULL")
|
||||
unique, _ := field.TagSettingsGet("UNIQUE")
|
||||
additionalType = notNull + " " + unique
|
||||
if value, ok := field.TagSettingsGet("DEFAULT"); ok {
|
||||
additionalType = additionalType + " DEFAULT " + value
|
||||
}
|
||||
|
||||
if value, ok := field.TagSettingsGet("COMMENT"); ok {
|
||||
additionalType = additionalType + " COMMENT " + value
|
||||
}
|
||||
|
||||
return fieldValue, dataType, size, strings.TrimSpace(additionalType)
|
||||
}
|
||||
|
||||
func currentDatabaseAndTable(dialect Dialect, tableName string) (string, string) {
|
||||
if strings.Contains(tableName, ".") {
|
||||
splitStrings := strings.SplitN(tableName, ".", 2)
|
||||
return splitStrings[0], splitStrings[1]
|
||||
}
|
||||
return dialect.CurrentDatabase(), tableName
|
||||
}
|
196
vendor/github.com/jinzhu/gorm/dialect_common.go
generated
vendored
Normal file
196
vendor/github.com/jinzhu/gorm/dialect_common.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var keyNameRegex = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||
|
||||
// DefaultForeignKeyNamer contains the default foreign key name generator method
|
||||
type DefaultForeignKeyNamer struct {
|
||||
}
|
||||
|
||||
type commonDialect struct {
|
||||
db SQLCommon
|
||||
DefaultForeignKeyNamer
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterDialect("common", &commonDialect{})
|
||||
}
|
||||
|
||||
func (commonDialect) GetName() string {
|
||||
return "common"
|
||||
}
|
||||
|
||||
func (s *commonDialect) SetDB(db SQLCommon) {
|
||||
s.db = db
|
||||
}
|
||||
|
||||
func (commonDialect) BindVar(i int) string {
|
||||
return "$$$" // ?
|
||||
}
|
||||
|
||||
func (commonDialect) Quote(key string) string {
|
||||
return fmt.Sprintf(`"%s"`, key)
|
||||
}
|
||||
|
||||
func (s *commonDialect) fieldCanAutoIncrement(field *StructField) bool {
|
||||
if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
|
||||
return strings.ToLower(value) != "false"
|
||||
}
|
||||
return field.IsPrimaryKey
|
||||
}
|
||||
|
||||
func (s *commonDialect) DataTypeOf(field *StructField) string {
|
||||
var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
|
||||
|
||||
if sqlType == "" {
|
||||
switch dataValue.Kind() {
|
||||
case reflect.Bool:
|
||||
sqlType = "BOOLEAN"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
sqlType = "INTEGER AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "INTEGER"
|
||||
}
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
sqlType = "BIGINT AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "BIGINT"
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sqlType = "FLOAT"
|
||||
case reflect.String:
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("VARCHAR(%d)", size)
|
||||
} else {
|
||||
sqlType = "VARCHAR(65532)"
|
||||
}
|
||||
case reflect.Struct:
|
||||
if _, ok := dataValue.Interface().(time.Time); ok {
|
||||
sqlType = "TIMESTAMP"
|
||||
}
|
||||
default:
|
||||
if _, ok := dataValue.Interface().([]byte); ok {
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("BINARY(%d)", size)
|
||||
} else {
|
||||
sqlType = "BINARY(65532)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sqlType == "" {
|
||||
panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String()))
|
||||
}
|
||||
|
||||
if strings.TrimSpace(additionalType) == "" {
|
||||
return sqlType
|
||||
}
|
||||
return fmt.Sprintf("%v %v", sqlType, additionalType)
|
||||
}
|
||||
|
||||
func (s commonDialect) HasIndex(tableName string, indexName string) bool {
|
||||
var count int
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s commonDialect) RemoveIndex(tableName string, indexName string) error {
|
||||
_, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s commonDialect) HasTable(tableName string) bool {
|
||||
var count int
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s commonDialect) HasColumn(tableName string, columnName string) bool {
|
||||
var count int
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error {
|
||||
_, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s commonDialect) CurrentDatabase() (name string) {
|
||||
s.db.QueryRow("SELECT DATABASE()").Scan(&name)
|
||||
return
|
||||
}
|
||||
|
||||
// LimitAndOffsetSQL return generated SQL with Limit and Offset
|
||||
func (s commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string, err error) {
|
||||
if limit != nil {
|
||||
if parsedLimit, err := s.parseInt(limit); err != nil {
|
||||
return "", err
|
||||
} else if parsedLimit >= 0 {
|
||||
sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
|
||||
}
|
||||
}
|
||||
if offset != nil {
|
||||
if parsedOffset, err := s.parseInt(offset); err != nil {
|
||||
return "", err
|
||||
} else if parsedOffset >= 0 {
|
||||
sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (commonDialect) SelectFromDummyTable() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (commonDialect) LastInsertIDOutputInterstitial(tableName, columnName string, columns []string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (commonDialect) DefaultValueStr() string {
|
||||
return "DEFAULT VALUES"
|
||||
}
|
||||
|
||||
// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
|
||||
func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string {
|
||||
keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_"))
|
||||
keyName = keyNameRegex.ReplaceAllString(keyName, "_")
|
||||
return keyName
|
||||
}
|
||||
|
||||
// NormalizeIndexAndColumn returns argument's index name and column name without doing anything
|
||||
func (commonDialect) NormalizeIndexAndColumn(indexName, columnName string) (string, string) {
|
||||
return indexName, columnName
|
||||
}
|
||||
|
||||
func (commonDialect) parseInt(value interface{}) (int64, error) {
|
||||
return strconv.ParseInt(fmt.Sprint(value), 0, 0)
|
||||
}
|
||||
|
||||
// IsByteArrayOrSlice returns true of the reflected value is an array or slice
|
||||
func IsByteArrayOrSlice(value reflect.Value) bool {
|
||||
return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0))
|
||||
}
|
246
vendor/github.com/jinzhu/gorm/dialect_mysql.go
generated
vendored
Normal file
246
vendor/github.com/jinzhu/gorm/dialect_mysql.go
generated
vendored
Normal file
@ -0,0 +1,246 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var mysqlIndexRegex = regexp.MustCompile(`^(.+)\((\d+)\)$`)
|
||||
|
||||
type mysql struct {
|
||||
commonDialect
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterDialect("mysql", &mysql{})
|
||||
}
|
||||
|
||||
func (mysql) GetName() string {
|
||||
return "mysql"
|
||||
}
|
||||
|
||||
func (mysql) Quote(key string) string {
|
||||
return fmt.Sprintf("`%s`", key)
|
||||
}
|
||||
|
||||
// Get Data Type for MySQL Dialect
|
||||
func (s *mysql) DataTypeOf(field *StructField) string {
|
||||
var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
|
||||
|
||||
// MySQL allows only one auto increment column per table, and it must
|
||||
// be a KEY column.
|
||||
if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
|
||||
if _, ok = field.TagSettingsGet("INDEX"); !ok && !field.IsPrimaryKey {
|
||||
field.TagSettingsDelete("AUTO_INCREMENT")
|
||||
}
|
||||
}
|
||||
|
||||
if sqlType == "" {
|
||||
switch dataValue.Kind() {
|
||||
case reflect.Bool:
|
||||
sqlType = "boolean"
|
||||
case reflect.Int8:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "tinyint AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "tinyint"
|
||||
}
|
||||
case reflect.Int, reflect.Int16, reflect.Int32:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "int AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "int"
|
||||
}
|
||||
case reflect.Uint8:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "tinyint unsigned AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "tinyint unsigned"
|
||||
}
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "int unsigned AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "int unsigned"
|
||||
}
|
||||
case reflect.Int64:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "bigint AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "bigint"
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "bigint unsigned AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "bigint unsigned"
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sqlType = "double"
|
||||
case reflect.String:
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("varchar(%d)", size)
|
||||
} else {
|
||||
sqlType = "longtext"
|
||||
}
|
||||
case reflect.Struct:
|
||||
if _, ok := dataValue.Interface().(time.Time); ok {
|
||||
precision := ""
|
||||
if p, ok := field.TagSettingsGet("PRECISION"); ok {
|
||||
precision = fmt.Sprintf("(%s)", p)
|
||||
}
|
||||
|
||||
if _, ok := field.TagSettings["NOT NULL"]; ok || field.IsPrimaryKey {
|
||||
sqlType = fmt.Sprintf("DATETIME%v", precision)
|
||||
} else {
|
||||
sqlType = fmt.Sprintf("DATETIME%v NULL", precision)
|
||||
}
|
||||
}
|
||||
default:
|
||||
if IsByteArrayOrSlice(dataValue) {
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("varbinary(%d)", size)
|
||||
} else {
|
||||
sqlType = "longblob"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sqlType == "" {
|
||||
panic(fmt.Sprintf("invalid sql type %s (%s) in field %s for mysql", dataValue.Type().Name(), dataValue.Kind().String(), field.Name))
|
||||
}
|
||||
|
||||
if strings.TrimSpace(additionalType) == "" {
|
||||
return sqlType
|
||||
}
|
||||
return fmt.Sprintf("%v %v", sqlType, additionalType)
|
||||
}
|
||||
|
||||
func (s mysql) RemoveIndex(tableName string, indexName string) error {
|
||||
_, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName)))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s mysql) ModifyColumn(tableName string, columnName string, typ string) error {
|
||||
_, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v MODIFY COLUMN %v %v", tableName, columnName, typ))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s mysql) LimitAndOffsetSQL(limit, offset interface{}) (sql string, err error) {
|
||||
if limit != nil {
|
||||
parsedLimit, err := s.parseInt(limit)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if parsedLimit >= 0 {
|
||||
sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
|
||||
|
||||
if offset != nil {
|
||||
parsedOffset, err := s.parseInt(offset)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if parsedOffset >= 0 {
|
||||
sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s mysql) HasForeignKey(tableName string, foreignKeyName string) bool {
|
||||
var count int
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_SCHEMA=? AND TABLE_NAME=? AND CONSTRAINT_NAME=? AND CONSTRAINT_TYPE='FOREIGN KEY'", currentDatabase, tableName, foreignKeyName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s mysql) HasTable(tableName string) bool {
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
var name string
|
||||
// allow mysql database name with '-' character
|
||||
if err := s.db.QueryRow(fmt.Sprintf("SHOW TABLES FROM `%s` WHERE `Tables_in_%s` = ?", currentDatabase, currentDatabase), tableName).Scan(&name); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return false
|
||||
}
|
||||
panic(err)
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (s mysql) HasIndex(tableName string, indexName string) bool {
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
if rows, err := s.db.Query(fmt.Sprintf("SHOW INDEXES FROM `%s` FROM `%s` WHERE Key_name = ?", tableName, currentDatabase), indexName); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
defer rows.Close()
|
||||
return rows.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s mysql) HasColumn(tableName string, columnName string) bool {
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
if rows, err := s.db.Query(fmt.Sprintf("SHOW COLUMNS FROM `%s` FROM `%s` WHERE Field = ?", tableName, currentDatabase), columnName); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
defer rows.Close()
|
||||
return rows.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s mysql) CurrentDatabase() (name string) {
|
||||
s.db.QueryRow("SELECT DATABASE()").Scan(&name)
|
||||
return
|
||||
}
|
||||
|
||||
func (mysql) SelectFromDummyTable() string {
|
||||
return "FROM DUAL"
|
||||
}
|
||||
|
||||
func (s mysql) BuildKeyName(kind, tableName string, fields ...string) string {
|
||||
keyName := s.commonDialect.BuildKeyName(kind, tableName, fields...)
|
||||
if utf8.RuneCountInString(keyName) <= 64 {
|
||||
return keyName
|
||||
}
|
||||
h := sha1.New()
|
||||
h.Write([]byte(keyName))
|
||||
bs := h.Sum(nil)
|
||||
|
||||
// sha1 is 40 characters, keep first 24 characters of destination
|
||||
destRunes := []rune(keyNameRegex.ReplaceAllString(fields[0], "_"))
|
||||
if len(destRunes) > 24 {
|
||||
destRunes = destRunes[:24]
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s%x", string(destRunes), bs)
|
||||
}
|
||||
|
||||
// NormalizeIndexAndColumn returns index name and column name for specify an index prefix length if needed
|
||||
func (mysql) NormalizeIndexAndColumn(indexName, columnName string) (string, string) {
|
||||
submatch := mysqlIndexRegex.FindStringSubmatch(indexName)
|
||||
if len(submatch) != 3 {
|
||||
return indexName, columnName
|
||||
}
|
||||
indexName = submatch[1]
|
||||
columnName = fmt.Sprintf("%s(%s)", columnName, submatch[2])
|
||||
return indexName, columnName
|
||||
}
|
||||
|
||||
func (mysql) DefaultValueStr() string {
|
||||
return "VALUES()"
|
||||
}
|
147
vendor/github.com/jinzhu/gorm/dialect_postgres.go
generated
vendored
Normal file
147
vendor/github.com/jinzhu/gorm/dialect_postgres.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type postgres struct {
|
||||
commonDialect
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterDialect("postgres", &postgres{})
|
||||
RegisterDialect("cloudsqlpostgres", &postgres{})
|
||||
}
|
||||
|
||||
func (postgres) GetName() string {
|
||||
return "postgres"
|
||||
}
|
||||
|
||||
func (postgres) BindVar(i int) string {
|
||||
return fmt.Sprintf("$%v", i)
|
||||
}
|
||||
|
||||
func (s *postgres) DataTypeOf(field *StructField) string {
|
||||
var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
|
||||
|
||||
if sqlType == "" {
|
||||
switch dataValue.Kind() {
|
||||
case reflect.Bool:
|
||||
sqlType = "boolean"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uintptr:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "serial"
|
||||
} else {
|
||||
sqlType = "integer"
|
||||
}
|
||||
case reflect.Int64, reflect.Uint32, reflect.Uint64:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "bigserial"
|
||||
} else {
|
||||
sqlType = "bigint"
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sqlType = "numeric"
|
||||
case reflect.String:
|
||||
if _, ok := field.TagSettingsGet("SIZE"); !ok {
|
||||
size = 0 // if SIZE haven't been set, use `text` as the default type, as there are no performance different
|
||||
}
|
||||
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("varchar(%d)", size)
|
||||
} else {
|
||||
sqlType = "text"
|
||||
}
|
||||
case reflect.Struct:
|
||||
if _, ok := dataValue.Interface().(time.Time); ok {
|
||||
sqlType = "timestamp with time zone"
|
||||
}
|
||||
case reflect.Map:
|
||||
if dataValue.Type().Name() == "Hstore" {
|
||||
sqlType = "hstore"
|
||||
}
|
||||
default:
|
||||
if IsByteArrayOrSlice(dataValue) {
|
||||
sqlType = "bytea"
|
||||
|
||||
if isUUID(dataValue) {
|
||||
sqlType = "uuid"
|
||||
}
|
||||
|
||||
if isJSON(dataValue) {
|
||||
sqlType = "jsonb"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sqlType == "" {
|
||||
panic(fmt.Sprintf("invalid sql type %s (%s) for postgres", dataValue.Type().Name(), dataValue.Kind().String()))
|
||||
}
|
||||
|
||||
if strings.TrimSpace(additionalType) == "" {
|
||||
return sqlType
|
||||
}
|
||||
return fmt.Sprintf("%v %v", sqlType, additionalType)
|
||||
}
|
||||
|
||||
func (s postgres) HasIndex(tableName string, indexName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow("SELECT count(*) FROM pg_indexes WHERE tablename = $1 AND indexname = $2 AND schemaname = CURRENT_SCHEMA()", tableName, indexName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s postgres) HasForeignKey(tableName string, foreignKeyName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow("SELECT count(con.conname) FROM pg_constraint con WHERE $1::regclass::oid = con.conrelid AND con.conname = $2 AND con.contype='f'", tableName, foreignKeyName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s postgres) HasTable(tableName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = $1 AND table_type = 'BASE TABLE' AND table_schema = CURRENT_SCHEMA()", tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s postgres) HasColumn(tableName string, columnName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_name = $1 AND column_name = $2 AND table_schema = CURRENT_SCHEMA()", tableName, columnName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s postgres) CurrentDatabase() (name string) {
|
||||
s.db.QueryRow("SELECT CURRENT_DATABASE()").Scan(&name)
|
||||
return
|
||||
}
|
||||
|
||||
func (s postgres) LastInsertIDOutputInterstitial(tableName, key string, columns []string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s postgres) LastInsertIDReturningSuffix(tableName, key string) string {
|
||||
return fmt.Sprintf("RETURNING %v.%v", tableName, key)
|
||||
}
|
||||
|
||||
func (postgres) SupportLastInsertID() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isUUID(value reflect.Value) bool {
|
||||
if value.Kind() != reflect.Array || value.Type().Len() != 16 {
|
||||
return false
|
||||
}
|
||||
typename := value.Type().Name()
|
||||
lower := strings.ToLower(typename)
|
||||
return "uuid" == lower || "guid" == lower
|
||||
}
|
||||
|
||||
func isJSON(value reflect.Value) bool {
|
||||
_, ok := value.Interface().(json.RawMessage)
|
||||
return ok
|
||||
}
|
107
vendor/github.com/jinzhu/gorm/dialect_sqlite3.go
generated
vendored
Normal file
107
vendor/github.com/jinzhu/gorm/dialect_sqlite3.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type sqlite3 struct {
|
||||
commonDialect
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterDialect("sqlite3", &sqlite3{})
|
||||
}
|
||||
|
||||
func (sqlite3) GetName() string {
|
||||
return "sqlite3"
|
||||
}
|
||||
|
||||
// Get Data Type for Sqlite Dialect
|
||||
func (s *sqlite3) DataTypeOf(field *StructField) string {
|
||||
var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
|
||||
|
||||
if sqlType == "" {
|
||||
switch dataValue.Kind() {
|
||||
case reflect.Bool:
|
||||
sqlType = "bool"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "integer primary key autoincrement"
|
||||
} else {
|
||||
sqlType = "integer"
|
||||
}
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "integer primary key autoincrement"
|
||||
} else {
|
||||
sqlType = "bigint"
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sqlType = "real"
|
||||
case reflect.String:
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("varchar(%d)", size)
|
||||
} else {
|
||||
sqlType = "text"
|
||||
}
|
||||
case reflect.Struct:
|
||||
if _, ok := dataValue.Interface().(time.Time); ok {
|
||||
sqlType = "datetime"
|
||||
}
|
||||
default:
|
||||
if IsByteArrayOrSlice(dataValue) {
|
||||
sqlType = "blob"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sqlType == "" {
|
||||
panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite3", dataValue.Type().Name(), dataValue.Kind().String()))
|
||||
}
|
||||
|
||||
if strings.TrimSpace(additionalType) == "" {
|
||||
return sqlType
|
||||
}
|
||||
return fmt.Sprintf("%v %v", sqlType, additionalType)
|
||||
}
|
||||
|
||||
func (s sqlite3) HasIndex(tableName string, indexName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s sqlite3) HasTable(tableName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s sqlite3) HasColumn(tableName string, columnName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');\n", columnName, columnName), tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s sqlite3) CurrentDatabase() (name string) {
|
||||
var (
|
||||
ifaces = make([]interface{}, 3)
|
||||
pointers = make([]*string, 3)
|
||||
i int
|
||||
)
|
||||
for i = 0; i < 3; i++ {
|
||||
ifaces[i] = &pointers[i]
|
||||
}
|
||||
if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil {
|
||||
return
|
||||
}
|
||||
if pointers[1] != nil {
|
||||
name = *pointers[1]
|
||||
}
|
||||
return
|
||||
}
|
30
vendor/github.com/jinzhu/gorm/docker-compose.yml
generated
vendored
Normal file
30
vendor/github.com/jinzhu/gorm/docker-compose.yml
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
mysql:
|
||||
image: 'mysql:latest'
|
||||
ports:
|
||||
- 9910:3306
|
||||
environment:
|
||||
- MYSQL_DATABASE=gorm
|
||||
- MYSQL_USER=gorm
|
||||
- MYSQL_PASSWORD=gorm
|
||||
- MYSQL_RANDOM_ROOT_PASSWORD="yes"
|
||||
postgres:
|
||||
image: 'postgres:latest'
|
||||
ports:
|
||||
- 9920:5432
|
||||
environment:
|
||||
- POSTGRES_USER=gorm
|
||||
- POSTGRES_DB=gorm
|
||||
- POSTGRES_PASSWORD=gorm
|
||||
mssql:
|
||||
image: 'mcmoe/mssqldocker:latest'
|
||||
ports:
|
||||
- 9930:1433
|
||||
environment:
|
||||
- ACCEPT_EULA=Y
|
||||
- SA_PASSWORD=LoremIpsum86
|
||||
- MSSQL_DB=gorm
|
||||
- MSSQL_USER=gorm
|
||||
- MSSQL_PASSWORD=LoremIpsum86
|
72
vendor/github.com/jinzhu/gorm/errors.go
generated
vendored
Normal file
72
vendor/github.com/jinzhu/gorm/errors.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrRecordNotFound returns a "record not found error". Occurs only when attempting to query the database with a struct; querying with a slice won't return this error
|
||||
ErrRecordNotFound = errors.New("record not found")
|
||||
// ErrInvalidSQL occurs when you attempt a query with invalid SQL
|
||||
ErrInvalidSQL = errors.New("invalid SQL")
|
||||
// ErrInvalidTransaction occurs when you are trying to `Commit` or `Rollback`
|
||||
ErrInvalidTransaction = errors.New("no valid transaction")
|
||||
// ErrCantStartTransaction can't start transaction when you are trying to start one with `Begin`
|
||||
ErrCantStartTransaction = errors.New("can't start transaction")
|
||||
// ErrUnaddressable unaddressable value
|
||||
ErrUnaddressable = errors.New("using unaddressable value")
|
||||
)
|
||||
|
||||
// Errors contains all happened errors
|
||||
type Errors []error
|
||||
|
||||
// IsRecordNotFoundError returns true if error contains a RecordNotFound error
|
||||
func IsRecordNotFoundError(err error) bool {
|
||||
if errs, ok := err.(Errors); ok {
|
||||
for _, err := range errs {
|
||||
if err == ErrRecordNotFound {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return err == ErrRecordNotFound
|
||||
}
|
||||
|
||||
// GetErrors gets all errors that have occurred and returns a slice of errors (Error type)
|
||||
func (errs Errors) GetErrors() []error {
|
||||
return errs
|
||||
}
|
||||
|
||||
// Add adds an error to a given slice of errors
|
||||
func (errs Errors) Add(newErrors ...error) Errors {
|
||||
for _, err := range newErrors {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if errors, ok := err.(Errors); ok {
|
||||
errs = errs.Add(errors...)
|
||||
} else {
|
||||
ok = true
|
||||
for _, e := range errs {
|
||||
if err == e {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// Error takes a slice of all errors that have occurred and returns it as a formatted string
|
||||
func (errs Errors) Error() string {
|
||||
var errors = []string{}
|
||||
for _, e := range errs {
|
||||
errors = append(errors, e.Error())
|
||||
}
|
||||
return strings.Join(errors, "; ")
|
||||
}
|
66
vendor/github.com/jinzhu/gorm/field.go
generated
vendored
Normal file
66
vendor/github.com/jinzhu/gorm/field.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Field model field definition
|
||||
type Field struct {
|
||||
*StructField
|
||||
IsBlank bool
|
||||
Field reflect.Value
|
||||
}
|
||||
|
||||
// Set set a value to the field
|
||||
func (field *Field) Set(value interface{}) (err error) {
|
||||
if !field.Field.IsValid() {
|
||||
return errors.New("field value not valid")
|
||||
}
|
||||
|
||||
if !field.Field.CanAddr() {
|
||||
return ErrUnaddressable
|
||||
}
|
||||
|
||||
reflectValue, ok := value.(reflect.Value)
|
||||
if !ok {
|
||||
reflectValue = reflect.ValueOf(value)
|
||||
}
|
||||
|
||||
fieldValue := field.Field
|
||||
if reflectValue.IsValid() {
|
||||
if reflectValue.Type().ConvertibleTo(fieldValue.Type()) {
|
||||
fieldValue.Set(reflectValue.Convert(fieldValue.Type()))
|
||||
} else {
|
||||
if fieldValue.Kind() == reflect.Ptr {
|
||||
if fieldValue.IsNil() {
|
||||
fieldValue.Set(reflect.New(field.Struct.Type.Elem()))
|
||||
}
|
||||
fieldValue = fieldValue.Elem()
|
||||
}
|
||||
|
||||
if reflectValue.Type().ConvertibleTo(fieldValue.Type()) {
|
||||
fieldValue.Set(reflectValue.Convert(fieldValue.Type()))
|
||||
} else if scanner, ok := fieldValue.Addr().Interface().(sql.Scanner); ok {
|
||||
v := reflectValue.Interface()
|
||||
if valuer, ok := v.(driver.Valuer); ok {
|
||||
if v, err = valuer.Value(); err == nil {
|
||||
err = scanner.Scan(v)
|
||||
}
|
||||
} else {
|
||||
err = scanner.Scan(v)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("could not convert argument of field %s from %s to %s", field.Name, reflectValue.Type(), fieldValue.Type())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
field.Field.Set(reflect.Zero(field.Field.Type()))
|
||||
}
|
||||
|
||||
field.IsBlank = isBlank(field.Field)
|
||||
return err
|
||||
}
|
15
vendor/github.com/jinzhu/gorm/go.mod
generated
vendored
Normal file
15
vendor/github.com/jinzhu/gorm/go.mod
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
module github.com/jinzhu/gorm
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/jinzhu/inflection v1.0.0
|
||||
github.com/jinzhu/now v1.0.1
|
||||
github.com/lib/pq v1.1.1
|
||||
github.com/mattn/go-sqlite3 v2.0.1+incompatible
|
||||
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd // indirect
|
||||
google.golang.org/appengine v1.4.0 // indirect
|
||||
)
|
29
vendor/github.com/jinzhu/gorm/go.sum
generated
vendored
Normal file
29
vendor/github.com/jinzhu/gorm/go.sum
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.0.1 h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=
|
||||
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4=
|
||||
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
|
||||
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd h1:GGJVjV8waZKRHrgwvtH66z9ZGVurTD1MT0n1Bb+q4aM=
|
||||
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
24
vendor/github.com/jinzhu/gorm/interface.go
generated
vendored
Normal file
24
vendor/github.com/jinzhu/gorm/interface.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
)
|
||||
|
||||
// SQLCommon is the minimal database connection functionality gorm requires. Implemented by *sql.DB.
|
||||
type SQLCommon interface {
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
Prepare(query string) (*sql.Stmt, error)
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryRow(query string, args ...interface{}) *sql.Row
|
||||
}
|
||||
|
||||
type sqlDb interface {
|
||||
Begin() (*sql.Tx, error)
|
||||
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
|
||||
}
|
||||
|
||||
type sqlTx interface {
|
||||
Commit() error
|
||||
Rollback() error
|
||||
}
|
211
vendor/github.com/jinzhu/gorm/join_table_handler.go
generated
vendored
Normal file
211
vendor/github.com/jinzhu/gorm/join_table_handler.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JoinTableHandlerInterface is an interface for how to handle many2many relations
|
||||
type JoinTableHandlerInterface interface {
|
||||
// initialize join table handler
|
||||
Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type)
|
||||
// Table return join table's table name
|
||||
Table(db *DB) string
|
||||
// Add create relationship in join table for source and destination
|
||||
Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error
|
||||
// Delete delete relationship in join table for sources
|
||||
Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error
|
||||
// JoinWith query with `Join` conditions
|
||||
JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB
|
||||
// SourceForeignKeys return source foreign keys
|
||||
SourceForeignKeys() []JoinTableForeignKey
|
||||
// DestinationForeignKeys return destination foreign keys
|
||||
DestinationForeignKeys() []JoinTableForeignKey
|
||||
}
|
||||
|
||||
// JoinTableForeignKey join table foreign key struct
|
||||
type JoinTableForeignKey struct {
|
||||
DBName string
|
||||
AssociationDBName string
|
||||
}
|
||||
|
||||
// JoinTableSource is a struct that contains model type and foreign keys
|
||||
type JoinTableSource struct {
|
||||
ModelType reflect.Type
|
||||
ForeignKeys []JoinTableForeignKey
|
||||
}
|
||||
|
||||
// JoinTableHandler default join table handler
|
||||
type JoinTableHandler struct {
|
||||
TableName string `sql:"-"`
|
||||
Source JoinTableSource `sql:"-"`
|
||||
Destination JoinTableSource `sql:"-"`
|
||||
}
|
||||
|
||||
// SourceForeignKeys return source foreign keys
|
||||
func (s *JoinTableHandler) SourceForeignKeys() []JoinTableForeignKey {
|
||||
return s.Source.ForeignKeys
|
||||
}
|
||||
|
||||
// DestinationForeignKeys return destination foreign keys
|
||||
func (s *JoinTableHandler) DestinationForeignKeys() []JoinTableForeignKey {
|
||||
return s.Destination.ForeignKeys
|
||||
}
|
||||
|
||||
// Setup initialize a default join table handler
|
||||
func (s *JoinTableHandler) Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) {
|
||||
s.TableName = tableName
|
||||
|
||||
s.Source = JoinTableSource{ModelType: source}
|
||||
s.Source.ForeignKeys = []JoinTableForeignKey{}
|
||||
for idx, dbName := range relationship.ForeignFieldNames {
|
||||
s.Source.ForeignKeys = append(s.Source.ForeignKeys, JoinTableForeignKey{
|
||||
DBName: relationship.ForeignDBNames[idx],
|
||||
AssociationDBName: dbName,
|
||||
})
|
||||
}
|
||||
|
||||
s.Destination = JoinTableSource{ModelType: destination}
|
||||
s.Destination.ForeignKeys = []JoinTableForeignKey{}
|
||||
for idx, dbName := range relationship.AssociationForeignFieldNames {
|
||||
s.Destination.ForeignKeys = append(s.Destination.ForeignKeys, JoinTableForeignKey{
|
||||
DBName: relationship.AssociationForeignDBNames[idx],
|
||||
AssociationDBName: dbName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Table return join table's table name
|
||||
func (s JoinTableHandler) Table(db *DB) string {
|
||||
return DefaultTableNameHandler(db, s.TableName)
|
||||
}
|
||||
|
||||
func (s JoinTableHandler) updateConditionMap(conditionMap map[string]interface{}, db *DB, joinTableSources []JoinTableSource, sources ...interface{}) {
|
||||
for _, source := range sources {
|
||||
scope := db.NewScope(source)
|
||||
modelType := scope.GetModelStruct().ModelType
|
||||
|
||||
for _, joinTableSource := range joinTableSources {
|
||||
if joinTableSource.ModelType == modelType {
|
||||
for _, foreignKey := range joinTableSource.ForeignKeys {
|
||||
if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok {
|
||||
conditionMap[foreignKey.DBName] = field.Field.Interface()
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add create relationship in join table for source and destination
|
||||
func (s JoinTableHandler) Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error {
|
||||
var (
|
||||
scope = db.NewScope("")
|
||||
conditionMap = map[string]interface{}{}
|
||||
)
|
||||
|
||||
// Update condition map for source
|
||||
s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source}, source)
|
||||
|
||||
// Update condition map for destination
|
||||
s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Destination}, destination)
|
||||
|
||||
var assignColumns, binVars, conditions []string
|
||||
var values []interface{}
|
||||
for key, value := range conditionMap {
|
||||
assignColumns = append(assignColumns, scope.Quote(key))
|
||||
binVars = append(binVars, `?`)
|
||||
conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key)))
|
||||
values = append(values, value)
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
values = append(values, value)
|
||||
}
|
||||
|
||||
quotedTable := scope.Quote(handler.Table(db))
|
||||
sql := fmt.Sprintf(
|
||||
"INSERT INTO %v (%v) SELECT %v %v WHERE NOT EXISTS (SELECT * FROM %v WHERE %v)",
|
||||
quotedTable,
|
||||
strings.Join(assignColumns, ","),
|
||||
strings.Join(binVars, ","),
|
||||
scope.Dialect().SelectFromDummyTable(),
|
||||
quotedTable,
|
||||
strings.Join(conditions, " AND "),
|
||||
)
|
||||
|
||||
return db.Exec(sql, values...).Error
|
||||
}
|
||||
|
||||
// Delete delete relationship in join table for sources
|
||||
func (s JoinTableHandler) Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error {
|
||||
var (
|
||||
scope = db.NewScope(nil)
|
||||
conditions []string
|
||||
values []interface{}
|
||||
conditionMap = map[string]interface{}{}
|
||||
)
|
||||
|
||||
s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source, s.Destination}, sources...)
|
||||
|
||||
for key, value := range conditionMap {
|
||||
conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key)))
|
||||
values = append(values, value)
|
||||
}
|
||||
|
||||
return db.Table(handler.Table(db)).Where(strings.Join(conditions, " AND "), values...).Delete("").Error
|
||||
}
|
||||
|
||||
// JoinWith query with `Join` conditions
|
||||
func (s JoinTableHandler) JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB {
|
||||
var (
|
||||
scope = db.NewScope(source)
|
||||
tableName = handler.Table(db)
|
||||
quotedTableName = scope.Quote(tableName)
|
||||
joinConditions []string
|
||||
values []interface{}
|
||||
)
|
||||
|
||||
if s.Source.ModelType == scope.GetModelStruct().ModelType {
|
||||
destinationTableName := db.NewScope(reflect.New(s.Destination.ModelType).Interface()).QuotedTableName()
|
||||
for _, foreignKey := range s.Destination.ForeignKeys {
|
||||
joinConditions = append(joinConditions, fmt.Sprintf("%v.%v = %v.%v", quotedTableName, scope.Quote(foreignKey.DBName), destinationTableName, scope.Quote(foreignKey.AssociationDBName)))
|
||||
}
|
||||
|
||||
var foreignDBNames []string
|
||||
var foreignFieldNames []string
|
||||
|
||||
for _, foreignKey := range s.Source.ForeignKeys {
|
||||
foreignDBNames = append(foreignDBNames, foreignKey.DBName)
|
||||
if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok {
|
||||
foreignFieldNames = append(foreignFieldNames, field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
foreignFieldValues := scope.getColumnAsArray(foreignFieldNames, scope.Value)
|
||||
|
||||
var condString string
|
||||
if len(foreignFieldValues) > 0 {
|
||||
var quotedForeignDBNames []string
|
||||
for _, dbName := range foreignDBNames {
|
||||
quotedForeignDBNames = append(quotedForeignDBNames, tableName+"."+dbName)
|
||||
}
|
||||
|
||||
condString = fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, quotedForeignDBNames), toQueryMarks(foreignFieldValues))
|
||||
|
||||
keys := scope.getColumnAsArray(foreignFieldNames, scope.Value)
|
||||
values = append(values, toQueryValues(keys))
|
||||
} else {
|
||||
condString = fmt.Sprintf("1 <> 1")
|
||||
}
|
||||
|
||||
return db.Joins(fmt.Sprintf("INNER JOIN %v ON %v", quotedTableName, strings.Join(joinConditions, " AND "))).
|
||||
Where(condString, toQueryValues(foreignFieldValues)...)
|
||||
}
|
||||
|
||||
db.Error = errors.New("wrong source type for join table handler")
|
||||
return db
|
||||
}
|
141
vendor/github.com/jinzhu/gorm/logger.go
generated
vendored
Normal file
141
vendor/github.com/jinzhu/gorm/logger.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultLogger = Logger{log.New(os.Stdout, "\r\n", 0)}
|
||||
sqlRegexp = regexp.MustCompile(`\?`)
|
||||
numericPlaceHolderRegexp = regexp.MustCompile(`\$\d+`)
|
||||
)
|
||||
|
||||
func isPrintable(s string) bool {
|
||||
for _, r := range s {
|
||||
if !unicode.IsPrint(r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var LogFormatter = func(values ...interface{}) (messages []interface{}) {
|
||||
if len(values) > 1 {
|
||||
var (
|
||||
sql string
|
||||
formattedValues []string
|
||||
level = values[0]
|
||||
currentTime = "\n\033[33m[" + NowFunc().Format("2006-01-02 15:04:05") + "]\033[0m"
|
||||
source = fmt.Sprintf("\033[35m(%v)\033[0m", values[1])
|
||||
)
|
||||
|
||||
messages = []interface{}{source, currentTime}
|
||||
|
||||
if len(values) == 2 {
|
||||
//remove the line break
|
||||
currentTime = currentTime[1:]
|
||||
//remove the brackets
|
||||
source = fmt.Sprintf("\033[35m%v\033[0m", values[1])
|
||||
|
||||
messages = []interface{}{currentTime, source}
|
||||
}
|
||||
|
||||
if level == "sql" {
|
||||
// duration
|
||||
messages = append(messages, fmt.Sprintf(" \033[36;1m[%.2fms]\033[0m ", float64(values[2].(time.Duration).Nanoseconds()/1e4)/100.0))
|
||||
// sql
|
||||
|
||||
for _, value := range values[4].([]interface{}) {
|
||||
indirectValue := reflect.Indirect(reflect.ValueOf(value))
|
||||
if indirectValue.IsValid() {
|
||||
value = indirectValue.Interface()
|
||||
if t, ok := value.(time.Time); ok {
|
||||
if t.IsZero() {
|
||||
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", "0000-00-00 00:00:00"))
|
||||
} else {
|
||||
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", t.Format("2006-01-02 15:04:05")))
|
||||
}
|
||||
} else if b, ok := value.([]byte); ok {
|
||||
if str := string(b); isPrintable(str) {
|
||||
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", str))
|
||||
} else {
|
||||
formattedValues = append(formattedValues, "'<binary>'")
|
||||
}
|
||||
} else if r, ok := value.(driver.Valuer); ok {
|
||||
if value, err := r.Value(); err == nil && value != nil {
|
||||
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value))
|
||||
} else {
|
||||
formattedValues = append(formattedValues, "NULL")
|
||||
}
|
||||
} else {
|
||||
switch value.(type) {
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool:
|
||||
formattedValues = append(formattedValues, fmt.Sprintf("%v", value))
|
||||
default:
|
||||
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
formattedValues = append(formattedValues, "NULL")
|
||||
}
|
||||
}
|
||||
|
||||
// differentiate between $n placeholders or else treat like ?
|
||||
if numericPlaceHolderRegexp.MatchString(values[3].(string)) {
|
||||
sql = values[3].(string)
|
||||
for index, value := range formattedValues {
|
||||
placeholder := fmt.Sprintf(`\$%d([^\d]|$)`, index+1)
|
||||
sql = regexp.MustCompile(placeholder).ReplaceAllString(sql, value+"$1")
|
||||
}
|
||||
} else {
|
||||
formattedValuesLength := len(formattedValues)
|
||||
for index, value := range sqlRegexp.Split(values[3].(string), -1) {
|
||||
sql += value
|
||||
if index < formattedValuesLength {
|
||||
sql += formattedValues[index]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
messages = append(messages, sql)
|
||||
messages = append(messages, fmt.Sprintf(" \n\033[36;31m[%v]\033[0m ", strconv.FormatInt(values[5].(int64), 10)+" rows affected or returned "))
|
||||
} else {
|
||||
messages = append(messages, "\033[31;1m")
|
||||
messages = append(messages, values[2:]...)
|
||||
messages = append(messages, "\033[0m")
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Print(v ...interface{})
|
||||
}
|
||||
|
||||
// LogWriter log writer interface
|
||||
type LogWriter interface {
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// Logger default logger
|
||||
type Logger struct {
|
||||
LogWriter
|
||||
}
|
||||
|
||||
// Print format & print log
|
||||
func (logger Logger) Print(values ...interface{}) {
|
||||
logger.Println(LogFormatter(values...)...)
|
||||
}
|
||||
|
||||
type nopLogger struct{}
|
||||
|
||||
func (nopLogger) Print(values ...interface{}) {}
|
881
vendor/github.com/jinzhu/gorm/main.go
generated
vendored
Normal file
881
vendor/github.com/jinzhu/gorm/main.go
generated
vendored
Normal file
@ -0,0 +1,881 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DB contains information for current db connection
|
||||
type DB struct {
|
||||
sync.RWMutex
|
||||
Value interface{}
|
||||
Error error
|
||||
RowsAffected int64
|
||||
|
||||
// single db
|
||||
db SQLCommon
|
||||
blockGlobalUpdate bool
|
||||
logMode logModeValue
|
||||
logger logger
|
||||
search *search
|
||||
values sync.Map
|
||||
|
||||
// global db
|
||||
parent *DB
|
||||
callbacks *Callback
|
||||
dialect Dialect
|
||||
singularTable bool
|
||||
|
||||
// function to be used to override the creating of a new timestamp
|
||||
nowFuncOverride func() time.Time
|
||||
}
|
||||
|
||||
type logModeValue int
|
||||
|
||||
const (
|
||||
defaultLogMode logModeValue = iota
|
||||
noLogMode
|
||||
detailedLogMode
|
||||
)
|
||||
|
||||
// Open initialize a new db connection, need to import driver first, e.g:
|
||||
//
|
||||
// import _ "github.com/go-sql-driver/mysql"
|
||||
// func main() {
|
||||
// db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local")
|
||||
// }
|
||||
// GORM has wrapped some drivers, for easier to remember driver's import path, so you could import the mysql driver with
|
||||
// import _ "github.com/jinzhu/gorm/dialects/mysql"
|
||||
// // import _ "github.com/jinzhu/gorm/dialects/postgres"
|
||||
// // import _ "github.com/jinzhu/gorm/dialects/sqlite"
|
||||
// // import _ "github.com/jinzhu/gorm/dialects/mssql"
|
||||
func Open(dialect string, args ...interface{}) (db *DB, err error) {
|
||||
if len(args) == 0 {
|
||||
err = errors.New("invalid database source")
|
||||
return nil, err
|
||||
}
|
||||
var source string
|
||||
var dbSQL SQLCommon
|
||||
var ownDbSQL bool
|
||||
|
||||
switch value := args[0].(type) {
|
||||
case string:
|
||||
var driver = dialect
|
||||
if len(args) == 1 {
|
||||
source = value
|
||||
} else if len(args) >= 2 {
|
||||
driver = value
|
||||
source = args[1].(string)
|
||||
}
|
||||
dbSQL, err = sql.Open(driver, source)
|
||||
ownDbSQL = true
|
||||
case SQLCommon:
|
||||
dbSQL = value
|
||||
ownDbSQL = false
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid database source: %v is not a valid type", value)
|
||||
}
|
||||
|
||||
db = &DB{
|
||||
db: dbSQL,
|
||||
logger: defaultLogger,
|
||||
callbacks: DefaultCallback,
|
||||
dialect: newDialect(dialect, dbSQL),
|
||||
}
|
||||
db.parent = db
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Send a ping to make sure the database connection is alive.
|
||||
if d, ok := dbSQL.(*sql.DB); ok {
|
||||
if err = d.Ping(); err != nil && ownDbSQL {
|
||||
d.Close()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// New clone a new db connection without search conditions
|
||||
func (s *DB) New() *DB {
|
||||
clone := s.clone()
|
||||
clone.search = nil
|
||||
clone.Value = nil
|
||||
return clone
|
||||
}
|
||||
|
||||
type closer interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Close close current db connection. If database connection is not an io.Closer, returns an error.
|
||||
func (s *DB) Close() error {
|
||||
if db, ok := s.parent.db.(closer); ok {
|
||||
return db.Close()
|
||||
}
|
||||
return errors.New("can't close current db")
|
||||
}
|
||||
|
||||
// DB get `*sql.DB` from current connection
|
||||
// If the underlying database connection is not a *sql.DB, returns nil
|
||||
func (s *DB) DB() *sql.DB {
|
||||
db, ok := s.db.(*sql.DB)
|
||||
if !ok {
|
||||
panic("can't support full GORM on currently status, maybe this is a TX instance.")
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// CommonDB return the underlying `*sql.DB` or `*sql.Tx` instance, mainly intended to allow coexistence with legacy non-GORM code.
|
||||
func (s *DB) CommonDB() SQLCommon {
|
||||
return s.db
|
||||
}
|
||||
|
||||
// Dialect get dialect
|
||||
func (s *DB) Dialect() Dialect {
|
||||
return s.dialect
|
||||
}
|
||||
|
||||
// Callback return `Callbacks` container, you could add/change/delete callbacks with it
|
||||
// db.Callback().Create().Register("update_created_at", updateCreated)
|
||||
// Refer https://jinzhu.github.io/gorm/development.html#callbacks
|
||||
func (s *DB) Callback() *Callback {
|
||||
s.parent.callbacks = s.parent.callbacks.clone(s.logger)
|
||||
return s.parent.callbacks
|
||||
}
|
||||
|
||||
// SetLogger replace default logger
|
||||
func (s *DB) SetLogger(log logger) {
|
||||
s.logger = log
|
||||
}
|
||||
|
||||
// LogMode set log mode, `true` for detailed logs, `false` for no log, default, will only print error logs
|
||||
func (s *DB) LogMode(enable bool) *DB {
|
||||
if enable {
|
||||
s.logMode = detailedLogMode
|
||||
} else {
|
||||
s.logMode = noLogMode
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SetNowFuncOverride set the function to be used when creating a new timestamp
|
||||
func (s *DB) SetNowFuncOverride(nowFuncOverride func() time.Time) *DB {
|
||||
s.nowFuncOverride = nowFuncOverride
|
||||
return s
|
||||
}
|
||||
|
||||
// Get a new timestamp, using the provided nowFuncOverride on the DB instance if set,
|
||||
// otherwise defaults to the global NowFunc()
|
||||
func (s *DB) nowFunc() time.Time {
|
||||
if s.nowFuncOverride != nil {
|
||||
return s.nowFuncOverride()
|
||||
}
|
||||
|
||||
return NowFunc()
|
||||
}
|
||||
|
||||
// BlockGlobalUpdate if true, generates an error on update/delete without where clause.
|
||||
// This is to prevent eventual error with empty objects updates/deletions
|
||||
func (s *DB) BlockGlobalUpdate(enable bool) *DB {
|
||||
s.blockGlobalUpdate = enable
|
||||
return s
|
||||
}
|
||||
|
||||
// HasBlockGlobalUpdate return state of block
|
||||
func (s *DB) HasBlockGlobalUpdate() bool {
|
||||
return s.blockGlobalUpdate
|
||||
}
|
||||
|
||||
// SingularTable use singular table by default
|
||||
func (s *DB) SingularTable(enable bool) {
|
||||
s.parent.Lock()
|
||||
defer s.parent.Unlock()
|
||||
s.parent.singularTable = enable
|
||||
}
|
||||
|
||||
// NewScope create a scope for current operation
|
||||
func (s *DB) NewScope(value interface{}) *Scope {
|
||||
dbClone := s.clone()
|
||||
dbClone.Value = value
|
||||
scope := &Scope{db: dbClone, Value: value}
|
||||
if s.search != nil {
|
||||
scope.Search = s.search.clone()
|
||||
} else {
|
||||
scope.Search = &search{}
|
||||
}
|
||||
return scope
|
||||
}
|
||||
|
||||
// QueryExpr returns the query as SqlExpr object
|
||||
func (s *DB) QueryExpr() *SqlExpr {
|
||||
scope := s.NewScope(s.Value)
|
||||
scope.InstanceSet("skip_bindvar", true)
|
||||
scope.prepareQuerySQL()
|
||||
|
||||
return Expr(scope.SQL, scope.SQLVars...)
|
||||
}
|
||||
|
||||
// SubQuery returns the query as sub query
|
||||
func (s *DB) SubQuery() *SqlExpr {
|
||||
scope := s.NewScope(s.Value)
|
||||
scope.InstanceSet("skip_bindvar", true)
|
||||
scope.prepareQuerySQL()
|
||||
|
||||
return Expr(fmt.Sprintf("(%v)", scope.SQL), scope.SQLVars...)
|
||||
}
|
||||
|
||||
// Where return a new relation, filter records with given conditions, accepts `map`, `struct` or `string` as conditions, refer http://jinzhu.github.io/gorm/crud.html#query
|
||||
func (s *DB) Where(query interface{}, args ...interface{}) *DB {
|
||||
return s.clone().search.Where(query, args...).db
|
||||
}
|
||||
|
||||
// Or filter records that match before conditions or this one, similar to `Where`
|
||||
func (s *DB) Or(query interface{}, args ...interface{}) *DB {
|
||||
return s.clone().search.Or(query, args...).db
|
||||
}
|
||||
|
||||
// Not filter records that don't match current conditions, similar to `Where`
|
||||
func (s *DB) Not(query interface{}, args ...interface{}) *DB {
|
||||
return s.clone().search.Not(query, args...).db
|
||||
}
|
||||
|
||||
// Limit specify the number of records to be retrieved
|
||||
func (s *DB) Limit(limit interface{}) *DB {
|
||||
return s.clone().search.Limit(limit).db
|
||||
}
|
||||
|
||||
// Offset specify the number of records to skip before starting to return the records
|
||||
func (s *DB) Offset(offset interface{}) *DB {
|
||||
return s.clone().search.Offset(offset).db
|
||||
}
|
||||
|
||||
// Order specify order when retrieve records from database, set reorder to `true` to overwrite defined conditions
|
||||
// db.Order("name DESC")
|
||||
// db.Order("name DESC", true) // reorder
|
||||
// db.Order(gorm.Expr("name = ? DESC", "first")) // sql expression
|
||||
func (s *DB) Order(value interface{}, reorder ...bool) *DB {
|
||||
return s.clone().search.Order(value, reorder...).db
|
||||
}
|
||||
|
||||
// Select specify fields that you want to retrieve from database when querying, by default, will select all fields;
|
||||
// When creating/updating, specify fields that you want to save to database
|
||||
func (s *DB) Select(query interface{}, args ...interface{}) *DB {
|
||||
return s.clone().search.Select(query, args...).db
|
||||
}
|
||||
|
||||
// Omit specify fields that you want to ignore when saving to database for creating, updating
|
||||
func (s *DB) Omit(columns ...string) *DB {
|
||||
return s.clone().search.Omit(columns...).db
|
||||
}
|
||||
|
||||
// Group specify the group method on the find
|
||||
func (s *DB) Group(query string) *DB {
|
||||
return s.clone().search.Group(query).db
|
||||
}
|
||||
|
||||
// Having specify HAVING conditions for GROUP BY
|
||||
func (s *DB) Having(query interface{}, values ...interface{}) *DB {
|
||||
return s.clone().search.Having(query, values...).db
|
||||
}
|
||||
|
||||
// Joins specify Joins conditions
|
||||
// db.Joins("JOIN emails ON emails.user_id = users.id AND emails.email = ?", "jinzhu@example.org").Find(&user)
|
||||
func (s *DB) Joins(query string, args ...interface{}) *DB {
|
||||
return s.clone().search.Joins(query, args...).db
|
||||
}
|
||||
|
||||
// Scopes pass current database connection to arguments `func(*DB) *DB`, which could be used to add conditions dynamically
|
||||
// func AmountGreaterThan1000(db *gorm.DB) *gorm.DB {
|
||||
// return db.Where("amount > ?", 1000)
|
||||
// }
|
||||
//
|
||||
// func OrderStatus(status []string) func (db *gorm.DB) *gorm.DB {
|
||||
// return func (db *gorm.DB) *gorm.DB {
|
||||
// return db.Scopes(AmountGreaterThan1000).Where("status in (?)", status)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// db.Scopes(AmountGreaterThan1000, OrderStatus([]string{"paid", "shipped"})).Find(&orders)
|
||||
// Refer https://jinzhu.github.io/gorm/crud.html#scopes
|
||||
func (s *DB) Scopes(funcs ...func(*DB) *DB) *DB {
|
||||
for _, f := range funcs {
|
||||
s = f(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Unscoped return all record including deleted record, refer Soft Delete https://jinzhu.github.io/gorm/crud.html#soft-delete
|
||||
func (s *DB) Unscoped() *DB {
|
||||
return s.clone().search.unscoped().db
|
||||
}
|
||||
|
||||
// Attrs initialize struct with argument if record not found with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate
|
||||
func (s *DB) Attrs(attrs ...interface{}) *DB {
|
||||
return s.clone().search.Attrs(attrs...).db
|
||||
}
|
||||
|
||||
// Assign assign result with argument regardless it is found or not with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate
|
||||
func (s *DB) Assign(attrs ...interface{}) *DB {
|
||||
return s.clone().search.Assign(attrs...).db
|
||||
}
|
||||
|
||||
// First find first record that match given conditions, order by primary key
|
||||
func (s *DB) First(out interface{}, where ...interface{}) *DB {
|
||||
newScope := s.NewScope(out)
|
||||
newScope.Search.Limit(1)
|
||||
|
||||
return newScope.Set("gorm:order_by_primary_key", "ASC").
|
||||
inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
|
||||
}
|
||||
|
||||
// Take return a record that match given conditions, the order will depend on the database implementation
|
||||
func (s *DB) Take(out interface{}, where ...interface{}) *DB {
|
||||
newScope := s.NewScope(out)
|
||||
newScope.Search.Limit(1)
|
||||
return newScope.inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
|
||||
}
|
||||
|
||||
// Last find last record that match given conditions, order by primary key
|
||||
func (s *DB) Last(out interface{}, where ...interface{}) *DB {
|
||||
newScope := s.NewScope(out)
|
||||
newScope.Search.Limit(1)
|
||||
return newScope.Set("gorm:order_by_primary_key", "DESC").
|
||||
inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
|
||||
}
|
||||
|
||||
// Find find records that match given conditions
|
||||
func (s *DB) Find(out interface{}, where ...interface{}) *DB {
|
||||
return s.NewScope(out).inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
|
||||
}
|
||||
|
||||
//Preloads preloads relations, don`t touch out
|
||||
func (s *DB) Preloads(out interface{}) *DB {
|
||||
return s.NewScope(out).InstanceSet("gorm:only_preload", 1).callCallbacks(s.parent.callbacks.queries).db
|
||||
}
|
||||
|
||||
// Scan scan value to a struct
|
||||
func (s *DB) Scan(dest interface{}) *DB {
|
||||
return s.NewScope(s.Value).Set("gorm:query_destination", dest).callCallbacks(s.parent.callbacks.queries).db
|
||||
}
|
||||
|
||||
// Row return `*sql.Row` with given conditions
|
||||
func (s *DB) Row() *sql.Row {
|
||||
return s.NewScope(s.Value).row()
|
||||
}
|
||||
|
||||
// Rows return `*sql.Rows` with given conditions
|
||||
func (s *DB) Rows() (*sql.Rows, error) {
|
||||
return s.NewScope(s.Value).rows()
|
||||
}
|
||||
|
||||
// ScanRows scan `*sql.Rows` to give struct
|
||||
func (s *DB) ScanRows(rows *sql.Rows, result interface{}) error {
|
||||
var (
|
||||
scope = s.NewScope(result)
|
||||
clone = scope.db
|
||||
columns, err = rows.Columns()
|
||||
)
|
||||
|
||||
if clone.AddError(err) == nil {
|
||||
scope.scan(rows, columns, scope.Fields())
|
||||
}
|
||||
|
||||
return clone.Error
|
||||
}
|
||||
|
||||
// Pluck used to query single column from a model as a map
|
||||
// var ages []int64
|
||||
// db.Find(&users).Pluck("age", &ages)
|
||||
func (s *DB) Pluck(column string, value interface{}) *DB {
|
||||
return s.NewScope(s.Value).pluck(column, value).db
|
||||
}
|
||||
|
||||
// Count get how many records for a model
|
||||
func (s *DB) Count(value interface{}) *DB {
|
||||
return s.NewScope(s.Value).count(value).db
|
||||
}
|
||||
|
||||
// Related get related associations
|
||||
func (s *DB) Related(value interface{}, foreignKeys ...string) *DB {
|
||||
return s.NewScope(s.Value).related(value, foreignKeys...).db
|
||||
}
|
||||
|
||||
// FirstOrInit find first matched record or initialize a new one with given conditions (only works with struct, map conditions)
|
||||
// https://jinzhu.github.io/gorm/crud.html#firstorinit
|
||||
func (s *DB) FirstOrInit(out interface{}, where ...interface{}) *DB {
|
||||
c := s.clone()
|
||||
if result := c.First(out, where...); result.Error != nil {
|
||||
if !result.RecordNotFound() {
|
||||
return result
|
||||
}
|
||||
c.NewScope(out).inlineCondition(where...).initialize()
|
||||
} else {
|
||||
c.NewScope(out).updatedAttrsWithValues(c.search.assignAttrs)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// FirstOrCreate find first matched record or create a new one with given conditions (only works with struct, map conditions)
|
||||
// https://jinzhu.github.io/gorm/crud.html#firstorcreate
|
||||
func (s *DB) FirstOrCreate(out interface{}, where ...interface{}) *DB {
|
||||
c := s.clone()
|
||||
if result := s.First(out, where...); result.Error != nil {
|
||||
if !result.RecordNotFound() {
|
||||
return result
|
||||
}
|
||||
return c.NewScope(out).inlineCondition(where...).initialize().callCallbacks(c.parent.callbacks.creates).db
|
||||
} else if len(c.search.assignAttrs) > 0 {
|
||||
return c.NewScope(out).InstanceSet("gorm:update_interface", c.search.assignAttrs).callCallbacks(c.parent.callbacks.updates).db
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Update update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
|
||||
// WARNING when update with struct, GORM will not update fields that with zero value
|
||||
func (s *DB) Update(attrs ...interface{}) *DB {
|
||||
return s.Updates(toSearchableMap(attrs...), true)
|
||||
}
|
||||
|
||||
// Updates update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
|
||||
func (s *DB) Updates(values interface{}, ignoreProtectedAttrs ...bool) *DB {
|
||||
return s.NewScope(s.Value).
|
||||
Set("gorm:ignore_protected_attrs", len(ignoreProtectedAttrs) > 0).
|
||||
InstanceSet("gorm:update_interface", values).
|
||||
callCallbacks(s.parent.callbacks.updates).db
|
||||
}
|
||||
|
||||
// UpdateColumn update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
|
||||
func (s *DB) UpdateColumn(attrs ...interface{}) *DB {
|
||||
return s.UpdateColumns(toSearchableMap(attrs...))
|
||||
}
|
||||
|
||||
// UpdateColumns update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
|
||||
func (s *DB) UpdateColumns(values interface{}) *DB {
|
||||
return s.NewScope(s.Value).
|
||||
Set("gorm:update_column", true).
|
||||
Set("gorm:save_associations", false).
|
||||
InstanceSet("gorm:update_interface", values).
|
||||
callCallbacks(s.parent.callbacks.updates).db
|
||||
}
|
||||
|
||||
// Save update value in database, if the value doesn't have primary key, will insert it
|
||||
func (s *DB) Save(value interface{}) *DB {
|
||||
scope := s.NewScope(value)
|
||||
if !scope.PrimaryKeyZero() {
|
||||
newDB := scope.callCallbacks(s.parent.callbacks.updates).db
|
||||
if newDB.Error == nil && newDB.RowsAffected == 0 {
|
||||
return s.New().Table(scope.TableName()).FirstOrCreate(value)
|
||||
}
|
||||
return newDB
|
||||
}
|
||||
return scope.callCallbacks(s.parent.callbacks.creates).db
|
||||
}
|
||||
|
||||
// Create insert the value into database
|
||||
func (s *DB) Create(value interface{}) *DB {
|
||||
scope := s.NewScope(value)
|
||||
return scope.callCallbacks(s.parent.callbacks.creates).db
|
||||
}
|
||||
|
||||
// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition
|
||||
// WARNING If model has DeletedAt field, GORM will only set field DeletedAt's value to current time
|
||||
func (s *DB) Delete(value interface{}, where ...interface{}) *DB {
|
||||
return s.NewScope(value).inlineCondition(where...).callCallbacks(s.parent.callbacks.deletes).db
|
||||
}
|
||||
|
||||
// Raw use raw sql as conditions, won't run it unless invoked by other methods
|
||||
// db.Raw("SELECT name, age FROM users WHERE name = ?", 3).Scan(&result)
|
||||
func (s *DB) Raw(sql string, values ...interface{}) *DB {
|
||||
return s.clone().search.Raw(true).Where(sql, values...).db
|
||||
}
|
||||
|
||||
// Exec execute raw sql
|
||||
func (s *DB) Exec(sql string, values ...interface{}) *DB {
|
||||
scope := s.NewScope(nil)
|
||||
generatedSQL := scope.buildCondition(map[string]interface{}{"query": sql, "args": values}, true)
|
||||
generatedSQL = strings.TrimSuffix(strings.TrimPrefix(generatedSQL, "("), ")")
|
||||
scope.Raw(generatedSQL)
|
||||
return scope.Exec().db
|
||||
}
|
||||
|
||||
// Model specify the model you would like to run db operations
|
||||
// // update all users's name to `hello`
|
||||
// db.Model(&User{}).Update("name", "hello")
|
||||
// // if user's primary key is non-blank, will use it as condition, then will only update the user's name to `hello`
|
||||
// db.Model(&user).Update("name", "hello")
|
||||
func (s *DB) Model(value interface{}) *DB {
|
||||
c := s.clone()
|
||||
c.Value = value
|
||||
return c
|
||||
}
|
||||
|
||||
// Table specify the table you would like to run db operations
|
||||
func (s *DB) Table(name string) *DB {
|
||||
clone := s.clone()
|
||||
clone.search.Table(name)
|
||||
clone.Value = nil
|
||||
return clone
|
||||
}
|
||||
|
||||
// Debug start debug mode
|
||||
func (s *DB) Debug() *DB {
|
||||
return s.clone().LogMode(true)
|
||||
}
|
||||
|
||||
// Transaction start a transaction as a block,
|
||||
// return error will rollback, otherwise to commit.
|
||||
func (s *DB) Transaction(fc func(tx *DB) error) (err error) {
|
||||
panicked := true
|
||||
tx := s.Begin()
|
||||
defer func() {
|
||||
// Make sure to rollback when panic, Block error or Commit error
|
||||
if panicked || err != nil {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
err = fc(tx)
|
||||
|
||||
if err == nil {
|
||||
err = tx.Commit().Error
|
||||
}
|
||||
|
||||
panicked = false
|
||||
return
|
||||
}
|
||||
|
||||
// Begin begins a transaction
|
||||
func (s *DB) Begin() *DB {
|
||||
return s.BeginTx(context.Background(), &sql.TxOptions{})
|
||||
}
|
||||
|
||||
// BeginTx begins a transaction with options
|
||||
func (s *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) *DB {
|
||||
c := s.clone()
|
||||
if db, ok := c.db.(sqlDb); ok && db != nil {
|
||||
tx, err := db.BeginTx(ctx, opts)
|
||||
c.db = interface{}(tx).(SQLCommon)
|
||||
|
||||
c.dialect.SetDB(c.db)
|
||||
c.AddError(err)
|
||||
} else {
|
||||
c.AddError(ErrCantStartTransaction)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Commit commit a transaction
|
||||
func (s *DB) Commit() *DB {
|
||||
var emptySQLTx *sql.Tx
|
||||
if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
|
||||
s.AddError(db.Commit())
|
||||
} else {
|
||||
s.AddError(ErrInvalidTransaction)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Rollback rollback a transaction
|
||||
func (s *DB) Rollback() *DB {
|
||||
var emptySQLTx *sql.Tx
|
||||
if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
|
||||
if err := db.Rollback(); err != nil && err != sql.ErrTxDone {
|
||||
s.AddError(err)
|
||||
}
|
||||
} else {
|
||||
s.AddError(ErrInvalidTransaction)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// RollbackUnlessCommitted rollback a transaction if it has not yet been
|
||||
// committed.
|
||||
func (s *DB) RollbackUnlessCommitted() *DB {
|
||||
var emptySQLTx *sql.Tx
|
||||
if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
|
||||
err := db.Rollback()
|
||||
// Ignore the error indicating that the transaction has already
|
||||
// been committed.
|
||||
if err != sql.ErrTxDone {
|
||||
s.AddError(err)
|
||||
}
|
||||
} else {
|
||||
s.AddError(ErrInvalidTransaction)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewRecord check if value's primary key is blank
|
||||
func (s *DB) NewRecord(value interface{}) bool {
|
||||
return s.NewScope(value).PrimaryKeyZero()
|
||||
}
|
||||
|
||||
// RecordNotFound check if returning ErrRecordNotFound error
|
||||
func (s *DB) RecordNotFound() bool {
|
||||
for _, err := range s.GetErrors() {
|
||||
if err == ErrRecordNotFound {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CreateTable create table for models
|
||||
func (s *DB) CreateTable(models ...interface{}) *DB {
|
||||
db := s.Unscoped()
|
||||
for _, model := range models {
|
||||
db = db.NewScope(model).createTable().db
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// DropTable drop table for models
|
||||
func (s *DB) DropTable(values ...interface{}) *DB {
|
||||
db := s.clone()
|
||||
for _, value := range values {
|
||||
if tableName, ok := value.(string); ok {
|
||||
db = db.Table(tableName)
|
||||
}
|
||||
|
||||
db = db.NewScope(value).dropTable().db
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// DropTableIfExists drop table if it is exist
|
||||
func (s *DB) DropTableIfExists(values ...interface{}) *DB {
|
||||
db := s.clone()
|
||||
for _, value := range values {
|
||||
if s.HasTable(value) {
|
||||
db.AddError(s.DropTable(value).Error)
|
||||
}
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// HasTable check has table or not
|
||||
func (s *DB) HasTable(value interface{}) bool {
|
||||
var (
|
||||
scope = s.NewScope(value)
|
||||
tableName string
|
||||
)
|
||||
|
||||
if name, ok := value.(string); ok {
|
||||
tableName = name
|
||||
} else {
|
||||
tableName = scope.TableName()
|
||||
}
|
||||
|
||||
has := scope.Dialect().HasTable(tableName)
|
||||
s.AddError(scope.db.Error)
|
||||
return has
|
||||
}
|
||||
|
||||
// AutoMigrate run auto migration for given models, will only add missing fields, won't delete/change current data
|
||||
func (s *DB) AutoMigrate(values ...interface{}) *DB {
|
||||
db := s.Unscoped()
|
||||
for _, value := range values {
|
||||
db = db.NewScope(value).autoMigrate().db
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// ModifyColumn modify column to type
|
||||
func (s *DB) ModifyColumn(column string, typ string) *DB {
|
||||
scope := s.NewScope(s.Value)
|
||||
scope.modifyColumn(column, typ)
|
||||
return scope.db
|
||||
}
|
||||
|
||||
// DropColumn drop a column
|
||||
func (s *DB) DropColumn(column string) *DB {
|
||||
scope := s.NewScope(s.Value)
|
||||
scope.dropColumn(column)
|
||||
return scope.db
|
||||
}
|
||||
|
||||
// AddIndex add index for columns with given name
|
||||
func (s *DB) AddIndex(indexName string, columns ...string) *DB {
|
||||
scope := s.Unscoped().NewScope(s.Value)
|
||||
scope.addIndex(false, indexName, columns...)
|
||||
return scope.db
|
||||
}
|
||||
|
||||
// AddUniqueIndex add unique index for columns with given name
|
||||
func (s *DB) AddUniqueIndex(indexName string, columns ...string) *DB {
|
||||
scope := s.Unscoped().NewScope(s.Value)
|
||||
scope.addIndex(true, indexName, columns...)
|
||||
return scope.db
|
||||
}
|
||||
|
||||
// RemoveIndex remove index with name
|
||||
func (s *DB) RemoveIndex(indexName string) *DB {
|
||||
scope := s.NewScope(s.Value)
|
||||
scope.removeIndex(indexName)
|
||||
return scope.db
|
||||
}
|
||||
|
||||
// AddForeignKey Add foreign key to the given scope, e.g:
|
||||
// db.Model(&User{}).AddForeignKey("city_id", "cities(id)", "RESTRICT", "RESTRICT")
|
||||
func (s *DB) AddForeignKey(field string, dest string, onDelete string, onUpdate string) *DB {
|
||||
scope := s.NewScope(s.Value)
|
||||
scope.addForeignKey(field, dest, onDelete, onUpdate)
|
||||
return scope.db
|
||||
}
|
||||
|
||||
// RemoveForeignKey Remove foreign key from the given scope, e.g:
|
||||
// db.Model(&User{}).RemoveForeignKey("city_id", "cities(id)")
|
||||
func (s *DB) RemoveForeignKey(field string, dest string) *DB {
|
||||
scope := s.clone().NewScope(s.Value)
|
||||
scope.removeForeignKey(field, dest)
|
||||
return scope.db
|
||||
}
|
||||
|
||||
// Association start `Association Mode` to handler relations things easir in that mode, refer: https://jinzhu.github.io/gorm/associations.html#association-mode
|
||||
func (s *DB) Association(column string) *Association {
|
||||
var err error
|
||||
var scope = s.Set("gorm:association:source", s.Value).NewScope(s.Value)
|
||||
|
||||
if primaryField := scope.PrimaryField(); primaryField.IsBlank {
|
||||
err = errors.New("primary key can't be nil")
|
||||
} else {
|
||||
if field, ok := scope.FieldByName(column); ok {
|
||||
if field.Relationship == nil || len(field.Relationship.ForeignFieldNames) == 0 {
|
||||
err = fmt.Errorf("invalid association %v for %v", column, scope.IndirectValue().Type())
|
||||
} else {
|
||||
return &Association{scope: scope, column: column, field: field}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("%v doesn't have column %v", scope.IndirectValue().Type(), column)
|
||||
}
|
||||
}
|
||||
|
||||
return &Association{Error: err}
|
||||
}
|
||||
|
||||
// Preload preload associations with given conditions
|
||||
// db.Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users)
|
||||
func (s *DB) Preload(column string, conditions ...interface{}) *DB {
|
||||
return s.clone().search.Preload(column, conditions...).db
|
||||
}
|
||||
|
||||
// Set set setting by name, which could be used in callbacks, will clone a new db, and update its setting
|
||||
func (s *DB) Set(name string, value interface{}) *DB {
|
||||
return s.clone().InstantSet(name, value)
|
||||
}
|
||||
|
||||
// InstantSet instant set setting, will affect current db
|
||||
func (s *DB) InstantSet(name string, value interface{}) *DB {
|
||||
s.values.Store(name, value)
|
||||
return s
|
||||
}
|
||||
|
||||
// Get get setting by name
|
||||
func (s *DB) Get(name string) (value interface{}, ok bool) {
|
||||
value, ok = s.values.Load(name)
|
||||
return
|
||||
}
|
||||
|
||||
// SetJoinTableHandler set a model's join table handler for a relation
|
||||
func (s *DB) SetJoinTableHandler(source interface{}, column string, handler JoinTableHandlerInterface) {
|
||||
scope := s.NewScope(source)
|
||||
for _, field := range scope.GetModelStruct().StructFields {
|
||||
if field.Name == column || field.DBName == column {
|
||||
if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" {
|
||||
source := (&Scope{Value: source}).GetModelStruct().ModelType
|
||||
destination := (&Scope{Value: reflect.New(field.Struct.Type).Interface()}).GetModelStruct().ModelType
|
||||
handler.Setup(field.Relationship, many2many, source, destination)
|
||||
field.Relationship.JoinTableHandler = handler
|
||||
if table := handler.Table(s); scope.Dialect().HasTable(table) {
|
||||
s.Table(table).AutoMigrate(handler)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddError add error to the db
|
||||
func (s *DB) AddError(err error) error {
|
||||
if err != nil {
|
||||
if err != ErrRecordNotFound {
|
||||
if s.logMode == defaultLogMode {
|
||||
go s.print("error", fileWithLineNum(), err)
|
||||
} else {
|
||||
s.log(err)
|
||||
}
|
||||
|
||||
errors := Errors(s.GetErrors())
|
||||
errors = errors.Add(err)
|
||||
if len(errors) > 1 {
|
||||
err = errors
|
||||
}
|
||||
}
|
||||
|
||||
s.Error = err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetErrors get happened errors from the db
|
||||
func (s *DB) GetErrors() []error {
|
||||
if errs, ok := s.Error.(Errors); ok {
|
||||
return errs
|
||||
} else if s.Error != nil {
|
||||
return []error{s.Error}
|
||||
}
|
||||
return []error{}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Private Methods For DB
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (s *DB) clone() *DB {
|
||||
db := &DB{
|
||||
db: s.db,
|
||||
parent: s.parent,
|
||||
logger: s.logger,
|
||||
logMode: s.logMode,
|
||||
Value: s.Value,
|
||||
Error: s.Error,
|
||||
blockGlobalUpdate: s.blockGlobalUpdate,
|
||||
dialect: newDialect(s.dialect.GetName(), s.db),
|
||||
nowFuncOverride: s.nowFuncOverride,
|
||||
}
|
||||
|
||||
s.values.Range(func(k, v interface{}) bool {
|
||||
db.values.Store(k, v)
|
||||
return true
|
||||
})
|
||||
|
||||
if s.search == nil {
|
||||
db.search = &search{limit: -1, offset: -1}
|
||||
} else {
|
||||
db.search = s.search.clone()
|
||||
}
|
||||
|
||||
db.search.db = db
|
||||
return db
|
||||
}
|
||||
|
||||
func (s *DB) print(v ...interface{}) {
|
||||
s.logger.Print(v...)
|
||||
}
|
||||
|
||||
func (s *DB) log(v ...interface{}) {
|
||||
if s != nil && s.logMode == detailedLogMode {
|
||||
s.print(append([]interface{}{"log", fileWithLineNum()}, v...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DB) slog(sql string, t time.Time, vars ...interface{}) {
|
||||
if s.logMode == detailedLogMode {
|
||||
s.print("sql", fileWithLineNum(), NowFunc().Sub(t), sql, vars, s.RowsAffected)
|
||||
}
|
||||
}
|
14
vendor/github.com/jinzhu/gorm/model.go
generated
vendored
Normal file
14
vendor/github.com/jinzhu/gorm/model.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package gorm
|
||||
|
||||
import "time"
|
||||
|
||||
// Model base model definition, including fields `ID`, `CreatedAt`, `UpdatedAt`, `DeletedAt`, which could be embedded in your models
|
||||
// type User struct {
|
||||
// gorm.Model
|
||||
// }
|
||||
type Model struct {
|
||||
ID uint `gorm:"primary_key"`
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
DeletedAt *time.Time `sql:"index"`
|
||||
}
|
671
vendor/github.com/jinzhu/gorm/model_struct.go
generated
vendored
Normal file
671
vendor/github.com/jinzhu/gorm/model_struct.go
generated
vendored
Normal file
@ -0,0 +1,671 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"go/ast"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jinzhu/inflection"
|
||||
)
|
||||
|
||||
// DefaultTableNameHandler default table name handler
|
||||
var DefaultTableNameHandler = func(db *DB, defaultTableName string) string {
|
||||
return defaultTableName
|
||||
}
|
||||
|
||||
// lock for mutating global cached model metadata
|
||||
var structsLock sync.Mutex
|
||||
|
||||
// global cache of model metadata
|
||||
var modelStructsMap sync.Map
|
||||
|
||||
// ModelStruct model definition
|
||||
type ModelStruct struct {
|
||||
PrimaryFields []*StructField
|
||||
StructFields []*StructField
|
||||
ModelType reflect.Type
|
||||
|
||||
defaultTableName string
|
||||
l sync.Mutex
|
||||
}
|
||||
|
||||
// TableName returns model's table name
|
||||
func (s *ModelStruct) TableName(db *DB) string {
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
|
||||
if s.defaultTableName == "" && db != nil && s.ModelType != nil {
|
||||
// Set default table name
|
||||
if tabler, ok := reflect.New(s.ModelType).Interface().(tabler); ok {
|
||||
s.defaultTableName = tabler.TableName()
|
||||
} else {
|
||||
tableName := ToTableName(s.ModelType.Name())
|
||||
db.parent.RLock()
|
||||
if db == nil || (db.parent != nil && !db.parent.singularTable) {
|
||||
tableName = inflection.Plural(tableName)
|
||||
}
|
||||
db.parent.RUnlock()
|
||||
s.defaultTableName = tableName
|
||||
}
|
||||
}
|
||||
|
||||
return DefaultTableNameHandler(db, s.defaultTableName)
|
||||
}
|
||||
|
||||
// StructField model field's struct definition
|
||||
type StructField struct {
|
||||
DBName string
|
||||
Name string
|
||||
Names []string
|
||||
IsPrimaryKey bool
|
||||
IsNormal bool
|
||||
IsIgnored bool
|
||||
IsScanner bool
|
||||
HasDefaultValue bool
|
||||
Tag reflect.StructTag
|
||||
TagSettings map[string]string
|
||||
Struct reflect.StructField
|
||||
IsForeignKey bool
|
||||
Relationship *Relationship
|
||||
|
||||
tagSettingsLock sync.RWMutex
|
||||
}
|
||||
|
||||
// TagSettingsSet Sets a tag in the tag settings map
|
||||
func (sf *StructField) TagSettingsSet(key, val string) {
|
||||
sf.tagSettingsLock.Lock()
|
||||
defer sf.tagSettingsLock.Unlock()
|
||||
sf.TagSettings[key] = val
|
||||
}
|
||||
|
||||
// TagSettingsGet returns a tag from the tag settings
|
||||
func (sf *StructField) TagSettingsGet(key string) (string, bool) {
|
||||
sf.tagSettingsLock.RLock()
|
||||
defer sf.tagSettingsLock.RUnlock()
|
||||
val, ok := sf.TagSettings[key]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// TagSettingsDelete deletes a tag
|
||||
func (sf *StructField) TagSettingsDelete(key string) {
|
||||
sf.tagSettingsLock.Lock()
|
||||
defer sf.tagSettingsLock.Unlock()
|
||||
delete(sf.TagSettings, key)
|
||||
}
|
||||
|
||||
func (sf *StructField) clone() *StructField {
|
||||
clone := &StructField{
|
||||
DBName: sf.DBName,
|
||||
Name: sf.Name,
|
||||
Names: sf.Names,
|
||||
IsPrimaryKey: sf.IsPrimaryKey,
|
||||
IsNormal: sf.IsNormal,
|
||||
IsIgnored: sf.IsIgnored,
|
||||
IsScanner: sf.IsScanner,
|
||||
HasDefaultValue: sf.HasDefaultValue,
|
||||
Tag: sf.Tag,
|
||||
TagSettings: map[string]string{},
|
||||
Struct: sf.Struct,
|
||||
IsForeignKey: sf.IsForeignKey,
|
||||
}
|
||||
|
||||
if sf.Relationship != nil {
|
||||
relationship := *sf.Relationship
|
||||
clone.Relationship = &relationship
|
||||
}
|
||||
|
||||
// copy the struct field tagSettings, they should be read-locked while they are copied
|
||||
sf.tagSettingsLock.Lock()
|
||||
defer sf.tagSettingsLock.Unlock()
|
||||
for key, value := range sf.TagSettings {
|
||||
clone.TagSettings[key] = value
|
||||
}
|
||||
|
||||
return clone
|
||||
}
|
||||
|
||||
// Relationship described the relationship between models
|
||||
type Relationship struct {
|
||||
Kind string
|
||||
PolymorphicType string
|
||||
PolymorphicDBName string
|
||||
PolymorphicValue string
|
||||
ForeignFieldNames []string
|
||||
ForeignDBNames []string
|
||||
AssociationForeignFieldNames []string
|
||||
AssociationForeignDBNames []string
|
||||
JoinTableHandler JoinTableHandlerInterface
|
||||
}
|
||||
|
||||
func getForeignField(column string, fields []*StructField) *StructField {
|
||||
for _, field := range fields {
|
||||
if field.Name == column || field.DBName == column || field.DBName == ToColumnName(column) {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetModelStruct get value's model struct, relationships based on struct and tag definition
|
||||
func (scope *Scope) GetModelStruct() *ModelStruct {
|
||||
var modelStruct ModelStruct
|
||||
// Scope value can't be nil
|
||||
if scope.Value == nil {
|
||||
return &modelStruct
|
||||
}
|
||||
|
||||
reflectType := reflect.ValueOf(scope.Value).Type()
|
||||
for reflectType.Kind() == reflect.Slice || reflectType.Kind() == reflect.Ptr {
|
||||
reflectType = reflectType.Elem()
|
||||
}
|
||||
|
||||
// Scope value need to be a struct
|
||||
if reflectType.Kind() != reflect.Struct {
|
||||
return &modelStruct
|
||||
}
|
||||
|
||||
// Get Cached model struct
|
||||
isSingularTable := false
|
||||
if scope.db != nil && scope.db.parent != nil {
|
||||
scope.db.parent.RLock()
|
||||
isSingularTable = scope.db.parent.singularTable
|
||||
scope.db.parent.RUnlock()
|
||||
}
|
||||
|
||||
hashKey := struct {
|
||||
singularTable bool
|
||||
reflectType reflect.Type
|
||||
}{isSingularTable, reflectType}
|
||||
if value, ok := modelStructsMap.Load(hashKey); ok && value != nil {
|
||||
return value.(*ModelStruct)
|
||||
}
|
||||
|
||||
modelStruct.ModelType = reflectType
|
||||
|
||||
// Get all fields
|
||||
for i := 0; i < reflectType.NumField(); i++ {
|
||||
if fieldStruct := reflectType.Field(i); ast.IsExported(fieldStruct.Name) {
|
||||
field := &StructField{
|
||||
Struct: fieldStruct,
|
||||
Name: fieldStruct.Name,
|
||||
Names: []string{fieldStruct.Name},
|
||||
Tag: fieldStruct.Tag,
|
||||
TagSettings: parseTagSetting(fieldStruct.Tag),
|
||||
}
|
||||
|
||||
// is ignored field
|
||||
if _, ok := field.TagSettingsGet("-"); ok {
|
||||
field.IsIgnored = true
|
||||
} else {
|
||||
if _, ok := field.TagSettingsGet("PRIMARY_KEY"); ok {
|
||||
field.IsPrimaryKey = true
|
||||
modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field)
|
||||
}
|
||||
|
||||
if _, ok := field.TagSettingsGet("DEFAULT"); ok && !field.IsPrimaryKey {
|
||||
field.HasDefaultValue = true
|
||||
}
|
||||
|
||||
if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok && !field.IsPrimaryKey {
|
||||
field.HasDefaultValue = true
|
||||
}
|
||||
|
||||
indirectType := fieldStruct.Type
|
||||
for indirectType.Kind() == reflect.Ptr {
|
||||
indirectType = indirectType.Elem()
|
||||
}
|
||||
|
||||
fieldValue := reflect.New(indirectType).Interface()
|
||||
if _, isScanner := fieldValue.(sql.Scanner); isScanner {
|
||||
// is scanner
|
||||
field.IsScanner, field.IsNormal = true, true
|
||||
if indirectType.Kind() == reflect.Struct {
|
||||
for i := 0; i < indirectType.NumField(); i++ {
|
||||
for key, value := range parseTagSetting(indirectType.Field(i).Tag) {
|
||||
if _, ok := field.TagSettingsGet(key); !ok {
|
||||
field.TagSettingsSet(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if _, isTime := fieldValue.(*time.Time); isTime {
|
||||
// is time
|
||||
field.IsNormal = true
|
||||
} else if _, ok := field.TagSettingsGet("EMBEDDED"); ok || fieldStruct.Anonymous {
|
||||
// is embedded struct
|
||||
for _, subField := range scope.New(fieldValue).GetModelStruct().StructFields {
|
||||
subField = subField.clone()
|
||||
subField.Names = append([]string{fieldStruct.Name}, subField.Names...)
|
||||
if prefix, ok := field.TagSettingsGet("EMBEDDED_PREFIX"); ok {
|
||||
subField.DBName = prefix + subField.DBName
|
||||
}
|
||||
|
||||
if subField.IsPrimaryKey {
|
||||
if _, ok := subField.TagSettingsGet("PRIMARY_KEY"); ok {
|
||||
modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, subField)
|
||||
} else {
|
||||
subField.IsPrimaryKey = false
|
||||
}
|
||||
}
|
||||
|
||||
if subField.Relationship != nil && subField.Relationship.JoinTableHandler != nil {
|
||||
if joinTableHandler, ok := subField.Relationship.JoinTableHandler.(*JoinTableHandler); ok {
|
||||
newJoinTableHandler := &JoinTableHandler{}
|
||||
newJoinTableHandler.Setup(subField.Relationship, joinTableHandler.TableName, reflectType, joinTableHandler.Destination.ModelType)
|
||||
subField.Relationship.JoinTableHandler = newJoinTableHandler
|
||||
}
|
||||
}
|
||||
|
||||
modelStruct.StructFields = append(modelStruct.StructFields, subField)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
// build relationships
|
||||
switch indirectType.Kind() {
|
||||
case reflect.Slice:
|
||||
defer func(field *StructField) {
|
||||
var (
|
||||
relationship = &Relationship{}
|
||||
toScope = scope.New(reflect.New(field.Struct.Type).Interface())
|
||||
foreignKeys []string
|
||||
associationForeignKeys []string
|
||||
elemType = field.Struct.Type
|
||||
)
|
||||
|
||||
if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" {
|
||||
foreignKeys = strings.Split(foreignKey, ",")
|
||||
}
|
||||
|
||||
if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" {
|
||||
associationForeignKeys = strings.Split(foreignKey, ",")
|
||||
} else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" {
|
||||
associationForeignKeys = strings.Split(foreignKey, ",")
|
||||
}
|
||||
|
||||
for elemType.Kind() == reflect.Slice || elemType.Kind() == reflect.Ptr {
|
||||
elemType = elemType.Elem()
|
||||
}
|
||||
|
||||
if elemType.Kind() == reflect.Struct {
|
||||
if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" {
|
||||
relationship.Kind = "many_to_many"
|
||||
|
||||
{ // Foreign Keys for Source
|
||||
joinTableDBNames := []string{}
|
||||
|
||||
if foreignKey, _ := field.TagSettingsGet("JOINTABLE_FOREIGNKEY"); foreignKey != "" {
|
||||
joinTableDBNames = strings.Split(foreignKey, ",")
|
||||
}
|
||||
|
||||
// if no foreign keys defined with tag
|
||||
if len(foreignKeys) == 0 {
|
||||
for _, field := range modelStruct.PrimaryFields {
|
||||
foreignKeys = append(foreignKeys, field.DBName)
|
||||
}
|
||||
}
|
||||
|
||||
for idx, foreignKey := range foreignKeys {
|
||||
if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil {
|
||||
// source foreign keys (db names)
|
||||
relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.DBName)
|
||||
|
||||
// setup join table foreign keys for source
|
||||
if len(joinTableDBNames) > idx {
|
||||
// if defined join table's foreign key
|
||||
relationship.ForeignDBNames = append(relationship.ForeignDBNames, joinTableDBNames[idx])
|
||||
} else {
|
||||
defaultJointableForeignKey := ToColumnName(reflectType.Name()) + "_" + foreignField.DBName
|
||||
relationship.ForeignDBNames = append(relationship.ForeignDBNames, defaultJointableForeignKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{ // Foreign Keys for Association (Destination)
|
||||
associationJoinTableDBNames := []string{}
|
||||
|
||||
if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_JOINTABLE_FOREIGNKEY"); foreignKey != "" {
|
||||
associationJoinTableDBNames = strings.Split(foreignKey, ",")
|
||||
}
|
||||
|
||||
// if no association foreign keys defined with tag
|
||||
if len(associationForeignKeys) == 0 {
|
||||
for _, field := range toScope.PrimaryFields() {
|
||||
associationForeignKeys = append(associationForeignKeys, field.DBName)
|
||||
}
|
||||
}
|
||||
|
||||
for idx, name := range associationForeignKeys {
|
||||
if field, ok := toScope.FieldByName(name); ok {
|
||||
// association foreign keys (db names)
|
||||
relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.DBName)
|
||||
|
||||
// setup join table foreign keys for association
|
||||
if len(associationJoinTableDBNames) > idx {
|
||||
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationJoinTableDBNames[idx])
|
||||
} else {
|
||||
// join table foreign keys for association
|
||||
joinTableDBName := ToColumnName(elemType.Name()) + "_" + field.DBName
|
||||
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, joinTableDBName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
joinTableHandler := JoinTableHandler{}
|
||||
joinTableHandler.Setup(relationship, many2many, reflectType, elemType)
|
||||
relationship.JoinTableHandler = &joinTableHandler
|
||||
field.Relationship = relationship
|
||||
} else {
|
||||
// User has many comments, associationType is User, comment use UserID as foreign key
|
||||
var associationType = reflectType.Name()
|
||||
var toFields = toScope.GetStructFields()
|
||||
relationship.Kind = "has_many"
|
||||
|
||||
if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" {
|
||||
// Dog has many toys, tag polymorphic is Owner, then associationType is Owner
|
||||
// Toy use OwnerID, OwnerType ('dogs') as foreign key
|
||||
if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil {
|
||||
associationType = polymorphic
|
||||
relationship.PolymorphicType = polymorphicType.Name
|
||||
relationship.PolymorphicDBName = polymorphicType.DBName
|
||||
// if Dog has multiple set of toys set name of the set (instead of default 'dogs')
|
||||
if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok {
|
||||
relationship.PolymorphicValue = value
|
||||
} else {
|
||||
relationship.PolymorphicValue = scope.TableName()
|
||||
}
|
||||
polymorphicType.IsForeignKey = true
|
||||
}
|
||||
}
|
||||
|
||||
// if no foreign keys defined with tag
|
||||
if len(foreignKeys) == 0 {
|
||||
// if no association foreign keys defined with tag
|
||||
if len(associationForeignKeys) == 0 {
|
||||
for _, field := range modelStruct.PrimaryFields {
|
||||
foreignKeys = append(foreignKeys, associationType+field.Name)
|
||||
associationForeignKeys = append(associationForeignKeys, field.Name)
|
||||
}
|
||||
} else {
|
||||
// generate foreign keys from defined association foreign keys
|
||||
for _, scopeFieldName := range associationForeignKeys {
|
||||
if foreignField := getForeignField(scopeFieldName, modelStruct.StructFields); foreignField != nil {
|
||||
foreignKeys = append(foreignKeys, associationType+foreignField.Name)
|
||||
associationForeignKeys = append(associationForeignKeys, foreignField.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// generate association foreign keys from foreign keys
|
||||
if len(associationForeignKeys) == 0 {
|
||||
for _, foreignKey := range foreignKeys {
|
||||
if strings.HasPrefix(foreignKey, associationType) {
|
||||
associationForeignKey := strings.TrimPrefix(foreignKey, associationType)
|
||||
if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
|
||||
associationForeignKeys = append(associationForeignKeys, associationForeignKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
|
||||
associationForeignKeys = []string{scope.PrimaryKey()}
|
||||
}
|
||||
} else if len(foreignKeys) != len(associationForeignKeys) {
|
||||
scope.Err(errors.New("invalid foreign keys, should have same length"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for idx, foreignKey := range foreignKeys {
|
||||
if foreignField := getForeignField(foreignKey, toFields); foreignField != nil {
|
||||
if associationField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); associationField != nil {
|
||||
// mark field as foreignkey, use global lock to avoid race
|
||||
structsLock.Lock()
|
||||
foreignField.IsForeignKey = true
|
||||
structsLock.Unlock()
|
||||
|
||||
// association foreign keys
|
||||
relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name)
|
||||
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName)
|
||||
|
||||
// association foreign keys
|
||||
relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
|
||||
relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(relationship.ForeignFieldNames) != 0 {
|
||||
field.Relationship = relationship
|
||||
}
|
||||
}
|
||||
} else {
|
||||
field.IsNormal = true
|
||||
}
|
||||
}(field)
|
||||
case reflect.Struct:
|
||||
defer func(field *StructField) {
|
||||
var (
|
||||
// user has one profile, associationType is User, profile use UserID as foreign key
|
||||
// user belongs to profile, associationType is Profile, user use ProfileID as foreign key
|
||||
associationType = reflectType.Name()
|
||||
relationship = &Relationship{}
|
||||
toScope = scope.New(reflect.New(field.Struct.Type).Interface())
|
||||
toFields = toScope.GetStructFields()
|
||||
tagForeignKeys []string
|
||||
tagAssociationForeignKeys []string
|
||||
)
|
||||
|
||||
if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" {
|
||||
tagForeignKeys = strings.Split(foreignKey, ",")
|
||||
}
|
||||
|
||||
if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" {
|
||||
tagAssociationForeignKeys = strings.Split(foreignKey, ",")
|
||||
} else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" {
|
||||
tagAssociationForeignKeys = strings.Split(foreignKey, ",")
|
||||
}
|
||||
|
||||
if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" {
|
||||
// Cat has one toy, tag polymorphic is Owner, then associationType is Owner
|
||||
// Toy use OwnerID, OwnerType ('cats') as foreign key
|
||||
if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil {
|
||||
associationType = polymorphic
|
||||
relationship.PolymorphicType = polymorphicType.Name
|
||||
relationship.PolymorphicDBName = polymorphicType.DBName
|
||||
// if Cat has several different types of toys set name for each (instead of default 'cats')
|
||||
if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok {
|
||||
relationship.PolymorphicValue = value
|
||||
} else {
|
||||
relationship.PolymorphicValue = scope.TableName()
|
||||
}
|
||||
polymorphicType.IsForeignKey = true
|
||||
}
|
||||
}
|
||||
|
||||
// Has One
|
||||
{
|
||||
var foreignKeys = tagForeignKeys
|
||||
var associationForeignKeys = tagAssociationForeignKeys
|
||||
// if no foreign keys defined with tag
|
||||
if len(foreignKeys) == 0 {
|
||||
// if no association foreign keys defined with tag
|
||||
if len(associationForeignKeys) == 0 {
|
||||
for _, primaryField := range modelStruct.PrimaryFields {
|
||||
foreignKeys = append(foreignKeys, associationType+primaryField.Name)
|
||||
associationForeignKeys = append(associationForeignKeys, primaryField.Name)
|
||||
}
|
||||
} else {
|
||||
// generate foreign keys form association foreign keys
|
||||
for _, associationForeignKey := range tagAssociationForeignKeys {
|
||||
if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
|
||||
foreignKeys = append(foreignKeys, associationType+foreignField.Name)
|
||||
associationForeignKeys = append(associationForeignKeys, foreignField.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// generate association foreign keys from foreign keys
|
||||
if len(associationForeignKeys) == 0 {
|
||||
for _, foreignKey := range foreignKeys {
|
||||
if strings.HasPrefix(foreignKey, associationType) {
|
||||
associationForeignKey := strings.TrimPrefix(foreignKey, associationType)
|
||||
if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
|
||||
associationForeignKeys = append(associationForeignKeys, associationForeignKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
|
||||
associationForeignKeys = []string{scope.PrimaryKey()}
|
||||
}
|
||||
} else if len(foreignKeys) != len(associationForeignKeys) {
|
||||
scope.Err(errors.New("invalid foreign keys, should have same length"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for idx, foreignKey := range foreignKeys {
|
||||
if foreignField := getForeignField(foreignKey, toFields); foreignField != nil {
|
||||
if scopeField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); scopeField != nil {
|
||||
// mark field as foreignkey, use global lock to avoid race
|
||||
structsLock.Lock()
|
||||
foreignField.IsForeignKey = true
|
||||
structsLock.Unlock()
|
||||
|
||||
// association foreign keys
|
||||
relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scopeField.Name)
|
||||
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scopeField.DBName)
|
||||
|
||||
// association foreign keys
|
||||
relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
|
||||
relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(relationship.ForeignFieldNames) != 0 {
|
||||
relationship.Kind = "has_one"
|
||||
field.Relationship = relationship
|
||||
} else {
|
||||
var foreignKeys = tagForeignKeys
|
||||
var associationForeignKeys = tagAssociationForeignKeys
|
||||
|
||||
if len(foreignKeys) == 0 {
|
||||
// generate foreign keys & association foreign keys
|
||||
if len(associationForeignKeys) == 0 {
|
||||
for _, primaryField := range toScope.PrimaryFields() {
|
||||
foreignKeys = append(foreignKeys, field.Name+primaryField.Name)
|
||||
associationForeignKeys = append(associationForeignKeys, primaryField.Name)
|
||||
}
|
||||
} else {
|
||||
// generate foreign keys with association foreign keys
|
||||
for _, associationForeignKey := range associationForeignKeys {
|
||||
if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil {
|
||||
foreignKeys = append(foreignKeys, field.Name+foreignField.Name)
|
||||
associationForeignKeys = append(associationForeignKeys, foreignField.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// generate foreign keys & association foreign keys
|
||||
if len(associationForeignKeys) == 0 {
|
||||
for _, foreignKey := range foreignKeys {
|
||||
if strings.HasPrefix(foreignKey, field.Name) {
|
||||
associationForeignKey := strings.TrimPrefix(foreignKey, field.Name)
|
||||
if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil {
|
||||
associationForeignKeys = append(associationForeignKeys, associationForeignKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
|
||||
associationForeignKeys = []string{toScope.PrimaryKey()}
|
||||
}
|
||||
} else if len(foreignKeys) != len(associationForeignKeys) {
|
||||
scope.Err(errors.New("invalid foreign keys, should have same length"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for idx, foreignKey := range foreignKeys {
|
||||
if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil {
|
||||
if associationField := getForeignField(associationForeignKeys[idx], toFields); associationField != nil {
|
||||
// mark field as foreignkey, use global lock to avoid race
|
||||
structsLock.Lock()
|
||||
foreignField.IsForeignKey = true
|
||||
structsLock.Unlock()
|
||||
|
||||
// association foreign keys
|
||||
relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name)
|
||||
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName)
|
||||
|
||||
// source foreign keys
|
||||
relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
|
||||
relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(relationship.ForeignFieldNames) != 0 {
|
||||
relationship.Kind = "belongs_to"
|
||||
field.Relationship = relationship
|
||||
}
|
||||
}
|
||||
}(field)
|
||||
default:
|
||||
field.IsNormal = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Even it is ignored, also possible to decode db value into the field
|
||||
if value, ok := field.TagSettingsGet("COLUMN"); ok {
|
||||
field.DBName = value
|
||||
} else {
|
||||
field.DBName = ToColumnName(fieldStruct.Name)
|
||||
}
|
||||
|
||||
modelStruct.StructFields = append(modelStruct.StructFields, field)
|
||||
}
|
||||
}
|
||||
|
||||
if len(modelStruct.PrimaryFields) == 0 {
|
||||
if field := getForeignField("id", modelStruct.StructFields); field != nil {
|
||||
field.IsPrimaryKey = true
|
||||
modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field)
|
||||
}
|
||||
}
|
||||
|
||||
modelStructsMap.Store(hashKey, &modelStruct)
|
||||
|
||||
return &modelStruct
|
||||
}
|
||||
|
||||
// GetStructFields get model's field structs
|
||||
func (scope *Scope) GetStructFields() (fields []*StructField) {
|
||||
return scope.GetModelStruct().StructFields
|
||||
}
|
||||
|
||||
func parseTagSetting(tags reflect.StructTag) map[string]string {
|
||||
setting := map[string]string{}
|
||||
for _, str := range []string{tags.Get("sql"), tags.Get("gorm")} {
|
||||
if str == "" {
|
||||
continue
|
||||
}
|
||||
tags := strings.Split(str, ";")
|
||||
for _, value := range tags {
|
||||
v := strings.Split(value, ":")
|
||||
k := strings.TrimSpace(strings.ToUpper(v[0]))
|
||||
if len(v) >= 2 {
|
||||
setting[k] = strings.Join(v[1:], ":")
|
||||
} else {
|
||||
setting[k] = k
|
||||
}
|
||||
}
|
||||
}
|
||||
return setting
|
||||
}
|
124
vendor/github.com/jinzhu/gorm/naming.go
generated
vendored
Normal file
124
vendor/github.com/jinzhu/gorm/naming.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Namer is a function type which is given a string and return a string
|
||||
type Namer func(string) string
|
||||
|
||||
// NamingStrategy represents naming strategies
|
||||
type NamingStrategy struct {
|
||||
DB Namer
|
||||
Table Namer
|
||||
Column Namer
|
||||
}
|
||||
|
||||
// TheNamingStrategy is being initialized with defaultNamingStrategy
|
||||
var TheNamingStrategy = &NamingStrategy{
|
||||
DB: defaultNamer,
|
||||
Table: defaultNamer,
|
||||
Column: defaultNamer,
|
||||
}
|
||||
|
||||
// AddNamingStrategy sets the naming strategy
|
||||
func AddNamingStrategy(ns *NamingStrategy) {
|
||||
if ns.DB == nil {
|
||||
ns.DB = defaultNamer
|
||||
}
|
||||
if ns.Table == nil {
|
||||
ns.Table = defaultNamer
|
||||
}
|
||||
if ns.Column == nil {
|
||||
ns.Column = defaultNamer
|
||||
}
|
||||
TheNamingStrategy = ns
|
||||
}
|
||||
|
||||
// DBName alters the given name by DB
|
||||
func (ns *NamingStrategy) DBName(name string) string {
|
||||
return ns.DB(name)
|
||||
}
|
||||
|
||||
// TableName alters the given name by Table
|
||||
func (ns *NamingStrategy) TableName(name string) string {
|
||||
return ns.Table(name)
|
||||
}
|
||||
|
||||
// ColumnName alters the given name by Column
|
||||
func (ns *NamingStrategy) ColumnName(name string) string {
|
||||
return ns.Column(name)
|
||||
}
|
||||
|
||||
// ToDBName convert string to db name
|
||||
func ToDBName(name string) string {
|
||||
return TheNamingStrategy.DBName(name)
|
||||
}
|
||||
|
||||
// ToTableName convert string to table name
|
||||
func ToTableName(name string) string {
|
||||
return TheNamingStrategy.TableName(name)
|
||||
}
|
||||
|
||||
// ToColumnName convert string to db name
|
||||
func ToColumnName(name string) string {
|
||||
return TheNamingStrategy.ColumnName(name)
|
||||
}
|
||||
|
||||
var smap = newSafeMap()
|
||||
|
||||
func defaultNamer(name string) string {
|
||||
const (
|
||||
lower = false
|
||||
upper = true
|
||||
)
|
||||
|
||||
if v := smap.Get(name); v != "" {
|
||||
return v
|
||||
}
|
||||
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
var (
|
||||
value = commonInitialismsReplacer.Replace(name)
|
||||
buf = bytes.NewBufferString("")
|
||||
lastCase, currCase, nextCase, nextNumber bool
|
||||
)
|
||||
|
||||
for i, v := range value[:len(value)-1] {
|
||||
nextCase = bool(value[i+1] >= 'A' && value[i+1] <= 'Z')
|
||||
nextNumber = bool(value[i+1] >= '0' && value[i+1] <= '9')
|
||||
|
||||
if i > 0 {
|
||||
if currCase == upper {
|
||||
if lastCase == upper && (nextCase == upper || nextNumber == upper) {
|
||||
buf.WriteRune(v)
|
||||
} else {
|
||||
if value[i-1] != '_' && value[i+1] != '_' {
|
||||
buf.WriteRune('_')
|
||||
}
|
||||
buf.WriteRune(v)
|
||||
}
|
||||
} else {
|
||||
buf.WriteRune(v)
|
||||
if i == len(value)-2 && (nextCase == upper && nextNumber == lower) {
|
||||
buf.WriteRune('_')
|
||||
}
|
||||
}
|
||||
} else {
|
||||
currCase = upper
|
||||
buf.WriteRune(v)
|
||||
}
|
||||
lastCase = currCase
|
||||
currCase = nextCase
|
||||
}
|
||||
|
||||
buf.WriteByte(value[len(value)-1])
|
||||
|
||||
s := strings.ToLower(buf.String())
|
||||
smap.Set(name, s)
|
||||
return s
|
||||
}
|
1421
vendor/github.com/jinzhu/gorm/scope.go
generated
vendored
Normal file
1421
vendor/github.com/jinzhu/gorm/scope.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
153
vendor/github.com/jinzhu/gorm/search.go
generated
vendored
Normal file
153
vendor/github.com/jinzhu/gorm/search.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type search struct {
|
||||
db *DB
|
||||
whereConditions []map[string]interface{}
|
||||
orConditions []map[string]interface{}
|
||||
notConditions []map[string]interface{}
|
||||
havingConditions []map[string]interface{}
|
||||
joinConditions []map[string]interface{}
|
||||
initAttrs []interface{}
|
||||
assignAttrs []interface{}
|
||||
selects map[string]interface{}
|
||||
omits []string
|
||||
orders []interface{}
|
||||
preload []searchPreload
|
||||
offset interface{}
|
||||
limit interface{}
|
||||
group string
|
||||
tableName string
|
||||
raw bool
|
||||
Unscoped bool
|
||||
ignoreOrderQuery bool
|
||||
}
|
||||
|
||||
type searchPreload struct {
|
||||
schema string
|
||||
conditions []interface{}
|
||||
}
|
||||
|
||||
func (s *search) clone() *search {
|
||||
clone := *s
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (s *search) Where(query interface{}, values ...interface{}) *search {
|
||||
s.whereConditions = append(s.whereConditions, map[string]interface{}{"query": query, "args": values})
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Not(query interface{}, values ...interface{}) *search {
|
||||
s.notConditions = append(s.notConditions, map[string]interface{}{"query": query, "args": values})
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Or(query interface{}, values ...interface{}) *search {
|
||||
s.orConditions = append(s.orConditions, map[string]interface{}{"query": query, "args": values})
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Attrs(attrs ...interface{}) *search {
|
||||
s.initAttrs = append(s.initAttrs, toSearchableMap(attrs...))
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Assign(attrs ...interface{}) *search {
|
||||
s.assignAttrs = append(s.assignAttrs, toSearchableMap(attrs...))
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Order(value interface{}, reorder ...bool) *search {
|
||||
if len(reorder) > 0 && reorder[0] {
|
||||
s.orders = []interface{}{}
|
||||
}
|
||||
|
||||
if value != nil && value != "" {
|
||||
s.orders = append(s.orders, value)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Select(query interface{}, args ...interface{}) *search {
|
||||
s.selects = map[string]interface{}{"query": query, "args": args}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Omit(columns ...string) *search {
|
||||
s.omits = columns
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Limit(limit interface{}) *search {
|
||||
s.limit = limit
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Offset(offset interface{}) *search {
|
||||
s.offset = offset
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Group(query string) *search {
|
||||
s.group = s.getInterfaceAsSQL(query)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Having(query interface{}, values ...interface{}) *search {
|
||||
if val, ok := query.(*SqlExpr); ok {
|
||||
s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": val.expr, "args": val.args})
|
||||
} else {
|
||||
s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": query, "args": values})
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Joins(query string, values ...interface{}) *search {
|
||||
s.joinConditions = append(s.joinConditions, map[string]interface{}{"query": query, "args": values})
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Preload(schema string, values ...interface{}) *search {
|
||||
var preloads []searchPreload
|
||||
for _, preload := range s.preload {
|
||||
if preload.schema != schema {
|
||||
preloads = append(preloads, preload)
|
||||
}
|
||||
}
|
||||
preloads = append(preloads, searchPreload{schema, values})
|
||||
s.preload = preloads
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Raw(b bool) *search {
|
||||
s.raw = b
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) unscoped() *search {
|
||||
s.Unscoped = true
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) Table(name string) *search {
|
||||
s.tableName = name
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *search) getInterfaceAsSQL(value interface{}) (str string) {
|
||||
switch value.(type) {
|
||||
case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||
str = fmt.Sprintf("%v", value)
|
||||
default:
|
||||
s.db.AddError(ErrInvalidSQL)
|
||||
}
|
||||
|
||||
if str == "-1" {
|
||||
return ""
|
||||
}
|
||||
return
|
||||
}
|
5
vendor/github.com/jinzhu/gorm/test_all.sh
generated
vendored
Normal file
5
vendor/github.com/jinzhu/gorm/test_all.sh
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
dialects=("postgres" "mysql" "mssql" "sqlite")
|
||||
|
||||
for dialect in "${dialects[@]}" ; do
|
||||
DEBUG=false GORM_DIALECT=${dialect} go test
|
||||
done
|
226
vendor/github.com/jinzhu/gorm/utils.go
generated
vendored
Normal file
226
vendor/github.com/jinzhu/gorm/utils.go
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
||||
package gorm
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NowFunc returns current time, this function is exported in order to be able
|
||||
// to give the flexibility to the developer to customize it according to their
|
||||
// needs, e.g:
|
||||
// gorm.NowFunc = func() time.Time {
|
||||
// return time.Now().UTC()
|
||||
// }
|
||||
var NowFunc = func() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Copied from golint
|
||||
var commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UID", "UI", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"}
|
||||
var commonInitialismsReplacer *strings.Replacer
|
||||
|
||||
var goSrcRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*.go`)
|
||||
var goTestRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*test.go`)
|
||||
|
||||
func init() {
|
||||
var commonInitialismsForReplacer []string
|
||||
for _, initialism := range commonInitialisms {
|
||||
commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism)))
|
||||
}
|
||||
commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...)
|
||||
}
|
||||
|
||||
type safeMap struct {
|
||||
m map[string]string
|
||||
l *sync.RWMutex
|
||||
}
|
||||
|
||||
func (s *safeMap) Set(key string, value string) {
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
s.m[key] = value
|
||||
}
|
||||
|
||||
func (s *safeMap) Get(key string) string {
|
||||
s.l.RLock()
|
||||
defer s.l.RUnlock()
|
||||
return s.m[key]
|
||||
}
|
||||
|
||||
func newSafeMap() *safeMap {
|
||||
return &safeMap{l: new(sync.RWMutex), m: make(map[string]string)}
|
||||
}
|
||||
|
||||
// SQL expression
|
||||
type SqlExpr struct {
|
||||
expr string
|
||||
args []interface{}
|
||||
}
|
||||
|
||||
// Expr generate raw SQL expression, for example:
|
||||
// DB.Model(&product).Update("price", gorm.Expr("price * ? + ?", 2, 100))
|
||||
func Expr(expression string, args ...interface{}) *SqlExpr {
|
||||
return &SqlExpr{expr: expression, args: args}
|
||||
}
|
||||
|
||||
func indirect(reflectValue reflect.Value) reflect.Value {
|
||||
for reflectValue.Kind() == reflect.Ptr {
|
||||
reflectValue = reflectValue.Elem()
|
||||
}
|
||||
return reflectValue
|
||||
}
|
||||
|
||||
func toQueryMarks(primaryValues [][]interface{}) string {
|
||||
var results []string
|
||||
|
||||
for _, primaryValue := range primaryValues {
|
||||
var marks []string
|
||||
for range primaryValue {
|
||||
marks = append(marks, "?")
|
||||
}
|
||||
|
||||
if len(marks) > 1 {
|
||||
results = append(results, fmt.Sprintf("(%v)", strings.Join(marks, ",")))
|
||||
} else {
|
||||
results = append(results, strings.Join(marks, ""))
|
||||
}
|
||||
}
|
||||
return strings.Join(results, ",")
|
||||
}
|
||||
|
||||
func toQueryCondition(scope *Scope, columns []string) string {
|
||||
var newColumns []string
|
||||
for _, column := range columns {
|
||||
newColumns = append(newColumns, scope.Quote(column))
|
||||
}
|
||||
|
||||
if len(columns) > 1 {
|
||||
return fmt.Sprintf("(%v)", strings.Join(newColumns, ","))
|
||||
}
|
||||
return strings.Join(newColumns, ",")
|
||||
}
|
||||
|
||||
func toQueryValues(values [][]interface{}) (results []interface{}) {
|
||||
for _, value := range values {
|
||||
for _, v := range value {
|
||||
results = append(results, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fileWithLineNum() string {
|
||||
for i := 2; i < 15; i++ {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if ok && (!goSrcRegexp.MatchString(file) || goTestRegexp.MatchString(file)) {
|
||||
return fmt.Sprintf("%v:%v", file, line)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isBlank(value reflect.Value) bool {
|
||||
switch value.Kind() {
|
||||
case reflect.String:
|
||||
return value.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !value.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return value.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return value.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return value.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return value.IsNil()
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface())
|
||||
}
|
||||
|
||||
func toSearchableMap(attrs ...interface{}) (result interface{}) {
|
||||
if len(attrs) > 1 {
|
||||
if str, ok := attrs[0].(string); ok {
|
||||
result = map[string]interface{}{str: attrs[1]}
|
||||
}
|
||||
} else if len(attrs) == 1 {
|
||||
if attr, ok := attrs[0].(map[string]interface{}); ok {
|
||||
result = attr
|
||||
}
|
||||
|
||||
if attr, ok := attrs[0].(interface{}); ok {
|
||||
result = attr
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func equalAsString(a interface{}, b interface{}) bool {
|
||||
return toString(a) == toString(b)
|
||||
}
|
||||
|
||||
func toString(str interface{}) string {
|
||||
if values, ok := str.([]interface{}); ok {
|
||||
var results []string
|
||||
for _, value := range values {
|
||||
results = append(results, toString(value))
|
||||
}
|
||||
return strings.Join(results, "_")
|
||||
} else if bytes, ok := str.([]byte); ok {
|
||||
return string(bytes)
|
||||
} else if reflectValue := reflect.Indirect(reflect.ValueOf(str)); reflectValue.IsValid() {
|
||||
return fmt.Sprintf("%v", reflectValue.Interface())
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func makeSlice(elemType reflect.Type) interface{} {
|
||||
if elemType.Kind() == reflect.Slice {
|
||||
elemType = elemType.Elem()
|
||||
}
|
||||
sliceType := reflect.SliceOf(elemType)
|
||||
slice := reflect.New(sliceType)
|
||||
slice.Elem().Set(reflect.MakeSlice(sliceType, 0, 0))
|
||||
return slice.Interface()
|
||||
}
|
||||
|
||||
func strInSlice(a string, list []string) bool {
|
||||
for _, b := range list {
|
||||
if b == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getValueFromFields return given fields's value
|
||||
func getValueFromFields(value reflect.Value, fieldNames []string) (results []interface{}) {
|
||||
// If value is a nil pointer, Indirect returns a zero Value!
|
||||
// Therefor we need to check for a zero value,
|
||||
// as FieldByName could panic
|
||||
if indirectValue := reflect.Indirect(value); indirectValue.IsValid() {
|
||||
for _, fieldName := range fieldNames {
|
||||
if fieldValue := reflect.Indirect(indirectValue.FieldByName(fieldName)); fieldValue.IsValid() {
|
||||
result := fieldValue.Interface()
|
||||
if r, ok := result.(driver.Valuer); ok {
|
||||
result, _ = r.Value()
|
||||
}
|
||||
results = append(results, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func addExtraSpaceIfExist(str string) string {
|
||||
if str != "" {
|
||||
return " " + str
|
||||
}
|
||||
return ""
|
||||
}
|
154
vendor/github.com/jinzhu/gorm/wercker.yml
generated
vendored
Normal file
154
vendor/github.com/jinzhu/gorm/wercker.yml
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
# use the default golang container from Docker Hub
|
||||
box: golang
|
||||
|
||||
services:
|
||||
- name: mariadb
|
||||
id: mariadb:latest
|
||||
env:
|
||||
MYSQL_DATABASE: gorm
|
||||
MYSQL_USER: gorm
|
||||
MYSQL_PASSWORD: gorm
|
||||
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
|
||||
- name: mysql
|
||||
id: mysql:latest
|
||||
env:
|
||||
MYSQL_DATABASE: gorm
|
||||
MYSQL_USER: gorm
|
||||
MYSQL_PASSWORD: gorm
|
||||
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
|
||||
- name: mysql57
|
||||
id: mysql:5.7
|
||||
env:
|
||||
MYSQL_DATABASE: gorm
|
||||
MYSQL_USER: gorm
|
||||
MYSQL_PASSWORD: gorm
|
||||
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
|
||||
- name: mysql56
|
||||
id: mysql:5.6
|
||||
env:
|
||||
MYSQL_DATABASE: gorm
|
||||
MYSQL_USER: gorm
|
||||
MYSQL_PASSWORD: gorm
|
||||
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
|
||||
- name: postgres
|
||||
id: postgres:latest
|
||||
env:
|
||||
POSTGRES_USER: gorm
|
||||
POSTGRES_PASSWORD: gorm
|
||||
POSTGRES_DB: gorm
|
||||
- name: postgres96
|
||||
id: postgres:9.6
|
||||
env:
|
||||
POSTGRES_USER: gorm
|
||||
POSTGRES_PASSWORD: gorm
|
||||
POSTGRES_DB: gorm
|
||||
- name: postgres95
|
||||
id: postgres:9.5
|
||||
env:
|
||||
POSTGRES_USER: gorm
|
||||
POSTGRES_PASSWORD: gorm
|
||||
POSTGRES_DB: gorm
|
||||
- name: postgres94
|
||||
id: postgres:9.4
|
||||
env:
|
||||
POSTGRES_USER: gorm
|
||||
POSTGRES_PASSWORD: gorm
|
||||
POSTGRES_DB: gorm
|
||||
- name: postgres93
|
||||
id: postgres:9.3
|
||||
env:
|
||||
POSTGRES_USER: gorm
|
||||
POSTGRES_PASSWORD: gorm
|
||||
POSTGRES_DB: gorm
|
||||
- name: mssql
|
||||
id: mcmoe/mssqldocker:latest
|
||||
env:
|
||||
ACCEPT_EULA: Y
|
||||
SA_PASSWORD: LoremIpsum86
|
||||
MSSQL_DB: gorm
|
||||
MSSQL_USER: gorm
|
||||
MSSQL_PASSWORD: LoremIpsum86
|
||||
|
||||
# The steps that will be executed in the build pipeline
|
||||
build:
|
||||
# The steps that will be executed on build
|
||||
steps:
|
||||
# Sets the go workspace and places you package
|
||||
# at the right place in the workspace tree
|
||||
- setup-go-workspace
|
||||
|
||||
# Gets the dependencies
|
||||
- script:
|
||||
name: go get
|
||||
code: |
|
||||
cd $WERCKER_SOURCE_DIR
|
||||
go version
|
||||
go get -t -v ./...
|
||||
|
||||
# Build the project
|
||||
- script:
|
||||
name: go build
|
||||
code: |
|
||||
go build ./...
|
||||
|
||||
# Test the project
|
||||
- script:
|
||||
name: test sqlite
|
||||
code: |
|
||||
go test -race -v ./...
|
||||
|
||||
- script:
|
||||
name: test mariadb
|
||||
code: |
|
||||
GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mariadb:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test mysql
|
||||
code: |
|
||||
GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test mysql5.7
|
||||
code: |
|
||||
GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql57:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test mysql5.6
|
||||
code: |
|
||||
GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql56:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test postgres
|
||||
code: |
|
||||
GORM_DIALECT=postgres GORM_DSN="host=postgres user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test postgres96
|
||||
code: |
|
||||
GORM_DIALECT=postgres GORM_DSN="host=postgres96 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test postgres95
|
||||
code: |
|
||||
GORM_DIALECT=postgres GORM_DSN="host=postgres95 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test postgres94
|
||||
code: |
|
||||
GORM_DIALECT=postgres GORM_DSN="host=postgres94 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test postgres93
|
||||
code: |
|
||||
GORM_DIALECT=postgres GORM_DSN="host=postgres93 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: test mssql
|
||||
code: |
|
||||
GORM_DIALECT=mssql GORM_DSN="sqlserver://gorm:LoremIpsum86@mssql:1433?database=gorm" go test -race ./...
|
||||
|
||||
- script:
|
||||
name: codecov
|
||||
code: |
|
||||
go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
bash <(curl -s https://codecov.io/bash)
|
21
vendor/github.com/jinzhu/inflection/LICENSE
generated
vendored
Normal file
21
vendor/github.com/jinzhu/inflection/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 - Jinzhu
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
55
vendor/github.com/jinzhu/inflection/README.md
generated
vendored
Normal file
55
vendor/github.com/jinzhu/inflection/README.md
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
# Inflection
|
||||
|
||||
Inflection pluralizes and singularizes English nouns
|
||||
|
||||
[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930)
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```go
|
||||
inflection.Plural("person") => "people"
|
||||
inflection.Plural("Person") => "People"
|
||||
inflection.Plural("PERSON") => "PEOPLE"
|
||||
inflection.Plural("bus") => "buses"
|
||||
inflection.Plural("BUS") => "BUSES"
|
||||
inflection.Plural("Bus") => "Buses"
|
||||
|
||||
inflection.Singular("people") => "person"
|
||||
inflection.Singular("People") => "Person"
|
||||
inflection.Singular("PEOPLE") => "PERSON"
|
||||
inflection.Singular("buses") => "bus"
|
||||
inflection.Singular("BUSES") => "BUS"
|
||||
inflection.Singular("Buses") => "Bus"
|
||||
|
||||
inflection.Plural("FancyPerson") => "FancyPeople"
|
||||
inflection.Singular("FancyPeople") => "FancyPerson"
|
||||
```
|
||||
|
||||
## Register Rules
|
||||
|
||||
Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
|
||||
|
||||
If you want to register more rules, follow:
|
||||
|
||||
```
|
||||
inflection.AddUncountable("fish")
|
||||
inflection.AddIrregular("person", "people")
|
||||
inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
|
||||
inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
|
||||
|
||||
## Author
|
||||
|
||||
**jinzhu**
|
||||
|
||||
* <http://github.com/jinzhu>
|
||||
* <wosmvp@gmail.com>
|
||||
* <http://twitter.com/zhangjinzhu>
|
||||
|
||||
## License
|
||||
|
||||
Released under the [MIT License](http://www.opensource.org/licenses/MIT).
|
1
vendor/github.com/jinzhu/inflection/go.mod
generated
vendored
Normal file
1
vendor/github.com/jinzhu/inflection/go.mod
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
module github.com/jinzhu/inflection
|
273
vendor/github.com/jinzhu/inflection/inflections.go
generated
vendored
Normal file
273
vendor/github.com/jinzhu/inflection/inflections.go
generated
vendored
Normal file
@ -0,0 +1,273 @@
|
||||
/*
|
||||
Package inflection pluralizes and singularizes English nouns.
|
||||
|
||||
inflection.Plural("person") => "people"
|
||||
inflection.Plural("Person") => "People"
|
||||
inflection.Plural("PERSON") => "PEOPLE"
|
||||
|
||||
inflection.Singular("people") => "person"
|
||||
inflection.Singular("People") => "Person"
|
||||
inflection.Singular("PEOPLE") => "PERSON"
|
||||
|
||||
inflection.Plural("FancyPerson") => "FancydPeople"
|
||||
inflection.Singular("FancyPeople") => "FancydPerson"
|
||||
|
||||
Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
|
||||
|
||||
If you want to register more rules, follow:
|
||||
|
||||
inflection.AddUncountable("fish")
|
||||
inflection.AddIrregular("person", "people")
|
||||
inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
|
||||
inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
|
||||
*/
|
||||
package inflection
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type inflection struct {
|
||||
regexp *regexp.Regexp
|
||||
replace string
|
||||
}
|
||||
|
||||
// Regular is a regexp find replace inflection
|
||||
type Regular struct {
|
||||
find string
|
||||
replace string
|
||||
}
|
||||
|
||||
// Irregular is a hard replace inflection,
|
||||
// containing both singular and plural forms
|
||||
type Irregular struct {
|
||||
singular string
|
||||
plural string
|
||||
}
|
||||
|
||||
// RegularSlice is a slice of Regular inflections
|
||||
type RegularSlice []Regular
|
||||
|
||||
// IrregularSlice is a slice of Irregular inflections
|
||||
type IrregularSlice []Irregular
|
||||
|
||||
var pluralInflections = RegularSlice{
|
||||
{"([a-z])$", "${1}s"},
|
||||
{"s$", "s"},
|
||||
{"^(ax|test)is$", "${1}es"},
|
||||
{"(octop|vir)us$", "${1}i"},
|
||||
{"(octop|vir)i$", "${1}i"},
|
||||
{"(alias|status)$", "${1}es"},
|
||||
{"(bu)s$", "${1}ses"},
|
||||
{"(buffal|tomat)o$", "${1}oes"},
|
||||
{"([ti])um$", "${1}a"},
|
||||
{"([ti])a$", "${1}a"},
|
||||
{"sis$", "ses"},
|
||||
{"(?:([^f])fe|([lr])f)$", "${1}${2}ves"},
|
||||
{"(hive)$", "${1}s"},
|
||||
{"([^aeiouy]|qu)y$", "${1}ies"},
|
||||
{"(x|ch|ss|sh)$", "${1}es"},
|
||||
{"(matr|vert|ind)(?:ix|ex)$", "${1}ices"},
|
||||
{"^(m|l)ouse$", "${1}ice"},
|
||||
{"^(m|l)ice$", "${1}ice"},
|
||||
{"^(ox)$", "${1}en"},
|
||||
{"^(oxen)$", "${1}"},
|
||||
{"(quiz)$", "${1}zes"},
|
||||
}
|
||||
|
||||
var singularInflections = RegularSlice{
|
||||
{"s$", ""},
|
||||
{"(ss)$", "${1}"},
|
||||
{"(n)ews$", "${1}ews"},
|
||||
{"([ti])a$", "${1}um"},
|
||||
{"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"},
|
||||
{"(^analy)(sis|ses)$", "${1}sis"},
|
||||
{"([^f])ves$", "${1}fe"},
|
||||
{"(hive)s$", "${1}"},
|
||||
{"(tive)s$", "${1}"},
|
||||
{"([lr])ves$", "${1}f"},
|
||||
{"([^aeiouy]|qu)ies$", "${1}y"},
|
||||
{"(s)eries$", "${1}eries"},
|
||||
{"(m)ovies$", "${1}ovie"},
|
||||
{"(c)ookies$", "${1}ookie"},
|
||||
{"(x|ch|ss|sh)es$", "${1}"},
|
||||
{"^(m|l)ice$", "${1}ouse"},
|
||||
{"(bus)(es)?$", "${1}"},
|
||||
{"(o)es$", "${1}"},
|
||||
{"(shoe)s$", "${1}"},
|
||||
{"(cris|test)(is|es)$", "${1}is"},
|
||||
{"^(a)x[ie]s$", "${1}xis"},
|
||||
{"(octop|vir)(us|i)$", "${1}us"},
|
||||
{"(alias|status)(es)?$", "${1}"},
|
||||
{"^(ox)en", "${1}"},
|
||||
{"(vert|ind)ices$", "${1}ex"},
|
||||
{"(matr)ices$", "${1}ix"},
|
||||
{"(quiz)zes$", "${1}"},
|
||||
{"(database)s$", "${1}"},
|
||||
}
|
||||
|
||||
var irregularInflections = IrregularSlice{
|
||||
{"person", "people"},
|
||||
{"man", "men"},
|
||||
{"child", "children"},
|
||||
{"sex", "sexes"},
|
||||
{"move", "moves"},
|
||||
{"mombie", "mombies"},
|
||||
}
|
||||
|
||||
var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"}
|
||||
|
||||
var compiledPluralMaps []inflection
|
||||
var compiledSingularMaps []inflection
|
||||
|
||||
func compile() {
|
||||
compiledPluralMaps = []inflection{}
|
||||
compiledSingularMaps = []inflection{}
|
||||
for _, uncountable := range uncountableInflections {
|
||||
inf := inflection{
|
||||
regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"),
|
||||
replace: "${1}",
|
||||
}
|
||||
compiledPluralMaps = append(compiledPluralMaps, inf)
|
||||
compiledSingularMaps = append(compiledSingularMaps, inf)
|
||||
}
|
||||
|
||||
for _, value := range irregularInflections {
|
||||
infs := []inflection{
|
||||
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)},
|
||||
inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)},
|
||||
inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural},
|
||||
}
|
||||
compiledPluralMaps = append(compiledPluralMaps, infs...)
|
||||
}
|
||||
|
||||
for _, value := range irregularInflections {
|
||||
infs := []inflection{
|
||||
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)},
|
||||
inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)},
|
||||
inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular},
|
||||
}
|
||||
compiledSingularMaps = append(compiledSingularMaps, infs...)
|
||||
}
|
||||
|
||||
for i := len(pluralInflections) - 1; i >= 0; i-- {
|
||||
value := pluralInflections[i]
|
||||
infs := []inflection{
|
||||
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
|
||||
inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
|
||||
inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
|
||||
}
|
||||
compiledPluralMaps = append(compiledPluralMaps, infs...)
|
||||
}
|
||||
|
||||
for i := len(singularInflections) - 1; i >= 0; i-- {
|
||||
value := singularInflections[i]
|
||||
infs := []inflection{
|
||||
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
|
||||
inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
|
||||
inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
|
||||
}
|
||||
compiledSingularMaps = append(compiledSingularMaps, infs...)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
compile()
|
||||
}
|
||||
|
||||
// AddPlural adds a plural inflection
|
||||
func AddPlural(find, replace string) {
|
||||
pluralInflections = append(pluralInflections, Regular{find, replace})
|
||||
compile()
|
||||
}
|
||||
|
||||
// AddSingular adds a singular inflection
|
||||
func AddSingular(find, replace string) {
|
||||
singularInflections = append(singularInflections, Regular{find, replace})
|
||||
compile()
|
||||
}
|
||||
|
||||
// AddIrregular adds an irregular inflection
|
||||
func AddIrregular(singular, plural string) {
|
||||
irregularInflections = append(irregularInflections, Irregular{singular, plural})
|
||||
compile()
|
||||
}
|
||||
|
||||
// AddUncountable adds an uncountable inflection
|
||||
func AddUncountable(values ...string) {
|
||||
uncountableInflections = append(uncountableInflections, values...)
|
||||
compile()
|
||||
}
|
||||
|
||||
// GetPlural retrieves the plural inflection values
|
||||
func GetPlural() RegularSlice {
|
||||
plurals := make(RegularSlice, len(pluralInflections))
|
||||
copy(plurals, pluralInflections)
|
||||
return plurals
|
||||
}
|
||||
|
||||
// GetSingular retrieves the singular inflection values
|
||||
func GetSingular() RegularSlice {
|
||||
singulars := make(RegularSlice, len(singularInflections))
|
||||
copy(singulars, singularInflections)
|
||||
return singulars
|
||||
}
|
||||
|
||||
// GetIrregular retrieves the irregular inflection values
|
||||
func GetIrregular() IrregularSlice {
|
||||
irregular := make(IrregularSlice, len(irregularInflections))
|
||||
copy(irregular, irregularInflections)
|
||||
return irregular
|
||||
}
|
||||
|
||||
// GetUncountable retrieves the uncountable inflection values
|
||||
func GetUncountable() []string {
|
||||
uncountables := make([]string, len(uncountableInflections))
|
||||
copy(uncountables, uncountableInflections)
|
||||
return uncountables
|
||||
}
|
||||
|
||||
// SetPlural sets the plural inflections slice
|
||||
func SetPlural(inflections RegularSlice) {
|
||||
pluralInflections = inflections
|
||||
compile()
|
||||
}
|
||||
|
||||
// SetSingular sets the singular inflections slice
|
||||
func SetSingular(inflections RegularSlice) {
|
||||
singularInflections = inflections
|
||||
compile()
|
||||
}
|
||||
|
||||
// SetIrregular sets the irregular inflections slice
|
||||
func SetIrregular(inflections IrregularSlice) {
|
||||
irregularInflections = inflections
|
||||
compile()
|
||||
}
|
||||
|
||||
// SetUncountable sets the uncountable inflections slice
|
||||
func SetUncountable(inflections []string) {
|
||||
uncountableInflections = inflections
|
||||
compile()
|
||||
}
|
||||
|
||||
// Plural converts a word to its plural form
|
||||
func Plural(str string) string {
|
||||
for _, inflection := range compiledPluralMaps {
|
||||
if inflection.regexp.MatchString(str) {
|
||||
return inflection.regexp.ReplaceAllString(str, inflection.replace)
|
||||
}
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// Singular converts a word to its singular form
|
||||
func Singular(str string) string {
|
||||
for _, inflection := range compiledSingularMaps {
|
||||
if inflection.regexp.MatchString(str) {
|
||||
return inflection.regexp.ReplaceAllString(str, inflection.replace)
|
||||
}
|
||||
}
|
||||
return str
|
||||
}
|
23
vendor/github.com/jinzhu/inflection/wercker.yml
generated
vendored
Normal file
23
vendor/github.com/jinzhu/inflection/wercker.yml
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
box: golang
|
||||
|
||||
build:
|
||||
steps:
|
||||
- setup-go-workspace
|
||||
|
||||
# Gets the dependencies
|
||||
- script:
|
||||
name: go get
|
||||
code: |
|
||||
go get
|
||||
|
||||
# Build the project
|
||||
- script:
|
||||
name: go build
|
||||
code: |
|
||||
go build ./...
|
||||
|
||||
# Test the project
|
||||
- script:
|
||||
name: go test
|
||||
code: |
|
||||
go test ./...
|
284
vendor/github.com/lib/pq/copy_BACKUP_6648.go
generated
vendored
Normal file
284
vendor/github.com/lib/pq/copy_BACKUP_6648.go
generated
vendored
Normal file
@ -0,0 +1,284 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
|
||||
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
|
||||
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
|
||||
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
|
||||
errCopyInProgress = errors.New("pq: COPY in progress")
|
||||
)
|
||||
|
||||
// CopyIn creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare(). The target table should be visible in search_path.
|
||||
func CopyIn(table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
// CopyInSchema creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare().
|
||||
func CopyInSchema(schema, table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
type copyin struct {
|
||||
cn *conn
|
||||
buffer []byte
|
||||
rowData chan []byte
|
||||
done chan bool
|
||||
|
||||
closed bool
|
||||
|
||||
sync.Mutex // guards err
|
||||
err error
|
||||
}
|
||||
|
||||
const ciBufferSize = 64 * 1024
|
||||
|
||||
// flush buffer before the buffer is filled up and needs reallocation
|
||||
const ciBufferFlushSize = 63 * 1024
|
||||
|
||||
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
|
||||
if !cn.isInTransaction() {
|
||||
return nil, errCopyNotSupportedOutsideTxn
|
||||
}
|
||||
|
||||
ci := ©in{
|
||||
cn: cn,
|
||||
buffer: make([]byte, 0, ciBufferSize),
|
||||
rowData: make(chan []byte),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
// add CopyData identifier + 4 bytes for message length
|
||||
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
|
||||
|
||||
b := cn.writeBuf('Q')
|
||||
b.string(q)
|
||||
cn.send(b)
|
||||
|
||||
awaitCopyInResponse:
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'G':
|
||||
if r.byte() != 0 {
|
||||
err = errBinaryCopyNotSupported
|
||||
break awaitCopyInResponse
|
||||
}
|
||||
go ci.resploop()
|
||||
return ci, nil
|
||||
case 'H':
|
||||
err = errCopyToNotSupported
|
||||
break awaitCopyInResponse
|
||||
case 'E':
|
||||
err = parseError(r)
|
||||
case 'Z':
|
||||
if err == nil {
|
||||
ci.setBad()
|
||||
errorf("unexpected ReadyForQuery in response to COPY")
|
||||
}
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for copy query: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// something went wrong, abort COPY before we return
|
||||
b = cn.writeBuf('f')
|
||||
b.string(err.Error())
|
||||
cn.send(b)
|
||||
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'c', 'C', 'E':
|
||||
case 'Z':
|
||||
// correctly aborted, we're done
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for CopyFail: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) flush(buf []byte) {
|
||||
// set message length (without message identifier)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
|
||||
|
||||
_, err := ci.cn.c.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) resploop() {
|
||||
for {
|
||||
var r readBuf
|
||||
t, err := ci.cn.recvMessage(&r)
|
||||
if err != nil {
|
||||
ci.setBad()
|
||||
ci.setError(err)
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
switch t {
|
||||
case 'C':
|
||||
// complete
|
||||
case 'N':
|
||||
if n := ci.cn.noticeHandler; n != nil {
|
||||
n(parseError(&r))
|
||||
}
|
||||
case 'Z':
|
||||
ci.cn.processReadyForQuery(&r)
|
||||
ci.done <- true
|
||||
return
|
||||
case 'E':
|
||||
err := parseError(&r)
|
||||
ci.setError(err)
|
||||
default:
|
||||
ci.setBad()
|
||||
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) setBad() {
|
||||
ci.Lock()
|
||||
ci.cn.bad = true
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) isBad() bool {
|
||||
ci.Lock()
|
||||
b := ci.cn.bad
|
||||
ci.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
func (ci *copyin) isErrorSet() bool {
|
||||
ci.Lock()
|
||||
isSet := (ci.err != nil)
|
||||
ci.Unlock()
|
||||
return isSet
|
||||
}
|
||||
|
||||
// setError() sets ci.err if one has not been set already. Caller must not be
|
||||
// holding ci.Mutex.
|
||||
func (ci *copyin) setError(err error) {
|
||||
ci.Lock()
|
||||
if ci.err == nil {
|
||||
ci.err = err
|
||||
}
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
// Exec inserts values into the COPY stream. The insert is asynchronous
|
||||
// and Exec can return errors from previous Exec calls to the same
|
||||
// COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
|
||||
if ci.isBad() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if ci.isErrorSet() {
|
||||
return nil, ci.err
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
return driver.RowsAffected(0), ci.Close()
|
||||
}
|
||||
|
||||
numValues := len(v)
|
||||
for i, value := range v {
|
||||
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
|
||||
if i < numValues-1 {
|
||||
ci.buffer = append(ci.buffer, '\t')
|
||||
}
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
ci.flush(ci.buffer)
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() (err error) {
|
||||
if ci.closed { // Don't do anything, we're already closed
|
||||
return nil
|
||||
}
|
||||
ci.closed = true
|
||||
|
||||
if ci.isBad() {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if len(ci.buffer) > 0 {
|
||||
ci.flush(ci.buffer)
|
||||
}
|
||||
// Avoid touching the scratch buffer as resploop could be using it.
|
||||
err = ci.cn.sendSimpleMessage('c')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-ci.done
|
||||
ci.cn.inCopy = false
|
||||
|
||||
if ci.isErrorSet() {
|
||||
err = ci.err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
284
vendor/github.com/lib/pq/copy_BACKUP_6722.go
generated
vendored
Normal file
284
vendor/github.com/lib/pq/copy_BACKUP_6722.go
generated
vendored
Normal file
@ -0,0 +1,284 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
|
||||
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
|
||||
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
|
||||
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
|
||||
errCopyInProgress = errors.New("pq: COPY in progress")
|
||||
)
|
||||
|
||||
// CopyIn creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare(). The target table should be visible in search_path.
|
||||
func CopyIn(table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
// CopyInSchema creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare().
|
||||
func CopyInSchema(schema, table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
type copyin struct {
|
||||
cn *conn
|
||||
buffer []byte
|
||||
rowData chan []byte
|
||||
done chan bool
|
||||
|
||||
closed bool
|
||||
|
||||
sync.Mutex // guards err
|
||||
err error
|
||||
}
|
||||
|
||||
const ciBufferSize = 64 * 1024
|
||||
|
||||
// flush buffer before the buffer is filled up and needs reallocation
|
||||
const ciBufferFlushSize = 63 * 1024
|
||||
|
||||
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
|
||||
if !cn.isInTransaction() {
|
||||
return nil, errCopyNotSupportedOutsideTxn
|
||||
}
|
||||
|
||||
ci := ©in{
|
||||
cn: cn,
|
||||
buffer: make([]byte, 0, ciBufferSize),
|
||||
rowData: make(chan []byte),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
// add CopyData identifier + 4 bytes for message length
|
||||
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
|
||||
|
||||
b := cn.writeBuf('Q')
|
||||
b.string(q)
|
||||
cn.send(b)
|
||||
|
||||
awaitCopyInResponse:
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'G':
|
||||
if r.byte() != 0 {
|
||||
err = errBinaryCopyNotSupported
|
||||
break awaitCopyInResponse
|
||||
}
|
||||
go ci.resploop()
|
||||
return ci, nil
|
||||
case 'H':
|
||||
err = errCopyToNotSupported
|
||||
break awaitCopyInResponse
|
||||
case 'E':
|
||||
err = parseError(r)
|
||||
case 'Z':
|
||||
if err == nil {
|
||||
ci.setBad()
|
||||
errorf("unexpected ReadyForQuery in response to COPY")
|
||||
}
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for copy query: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// something went wrong, abort COPY before we return
|
||||
b = cn.writeBuf('f')
|
||||
b.string(err.Error())
|
||||
cn.send(b)
|
||||
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'c', 'C', 'E':
|
||||
case 'Z':
|
||||
// correctly aborted, we're done
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for CopyFail: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) flush(buf []byte) {
|
||||
// set message length (without message identifier)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
|
||||
|
||||
_, err := ci.cn.c.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) resploop() {
|
||||
for {
|
||||
var r readBuf
|
||||
t, err := ci.cn.recvMessage(&r)
|
||||
if err != nil {
|
||||
ci.setBad()
|
||||
ci.setError(err)
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
switch t {
|
||||
case 'C':
|
||||
// complete
|
||||
case 'N':
|
||||
if n := ci.cn.noticeHandler; n != nil {
|
||||
n(parseError(&r))
|
||||
}
|
||||
case 'Z':
|
||||
ci.cn.processReadyForQuery(&r)
|
||||
ci.done <- true
|
||||
return
|
||||
case 'E':
|
||||
err := parseError(&r)
|
||||
ci.setError(err)
|
||||
default:
|
||||
ci.setBad()
|
||||
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) setBad() {
|
||||
ci.Lock()
|
||||
ci.cn.bad = true
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) isBad() bool {
|
||||
ci.Lock()
|
||||
b := ci.cn.bad
|
||||
ci.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
func (ci *copyin) isErrorSet() bool {
|
||||
ci.Lock()
|
||||
isSet := (ci.err != nil)
|
||||
ci.Unlock()
|
||||
return isSet
|
||||
}
|
||||
|
||||
// setError() sets ci.err if one has not been set already. Caller must not be
|
||||
// holding ci.Mutex.
|
||||
func (ci *copyin) setError(err error) {
|
||||
ci.Lock()
|
||||
if ci.err == nil {
|
||||
ci.err = err
|
||||
}
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
// Exec inserts values into the COPY stream. The insert is asynchronous
|
||||
// and Exec can return errors from previous Exec calls to the same
|
||||
// COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
|
||||
if ci.isBad() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if ci.isErrorSet() {
|
||||
return nil, ci.err
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
return driver.RowsAffected(0), ci.Close()
|
||||
}
|
||||
|
||||
numValues := len(v)
|
||||
for i, value := range v {
|
||||
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
|
||||
if i < numValues-1 {
|
||||
ci.buffer = append(ci.buffer, '\t')
|
||||
}
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
ci.flush(ci.buffer)
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() (err error) {
|
||||
if ci.closed { // Don't do anything, we're already closed
|
||||
return nil
|
||||
}
|
||||
ci.closed = true
|
||||
|
||||
if ci.isBad() {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if len(ci.buffer) > 0 {
|
||||
ci.flush(ci.buffer)
|
||||
}
|
||||
// Avoid touching the scratch buffer as resploop could be using it.
|
||||
err = ci.cn.sendSimpleMessage('c')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-ci.done
|
||||
ci.cn.inCopy = false
|
||||
|
||||
if ci.isErrorSet() {
|
||||
err = ci.err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
0
vendor/github.com/lib/pq/copy_BASE_6648.go
generated
vendored
Normal file
0
vendor/github.com/lib/pq/copy_BASE_6648.go
generated
vendored
Normal file
0
vendor/github.com/lib/pq/copy_BASE_6722.go
generated
vendored
Normal file
0
vendor/github.com/lib/pq/copy_BASE_6722.go
generated
vendored
Normal file
284
vendor/github.com/lib/pq/copy_LOCAL_6648.go
generated
vendored
Normal file
284
vendor/github.com/lib/pq/copy_LOCAL_6648.go
generated
vendored
Normal file
@ -0,0 +1,284 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
|
||||
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
|
||||
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
|
||||
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
|
||||
errCopyInProgress = errors.New("pq: COPY in progress")
|
||||
)
|
||||
|
||||
// CopyIn creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare(). The target table should be visible in search_path.
|
||||
func CopyIn(table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
// CopyInSchema creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare().
|
||||
func CopyInSchema(schema, table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
type copyin struct {
|
||||
cn *conn
|
||||
buffer []byte
|
||||
rowData chan []byte
|
||||
done chan bool
|
||||
|
||||
closed bool
|
||||
|
||||
sync.Mutex // guards err
|
||||
err error
|
||||
}
|
||||
|
||||
const ciBufferSize = 64 * 1024
|
||||
|
||||
// flush buffer before the buffer is filled up and needs reallocation
|
||||
const ciBufferFlushSize = 63 * 1024
|
||||
|
||||
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
|
||||
if !cn.isInTransaction() {
|
||||
return nil, errCopyNotSupportedOutsideTxn
|
||||
}
|
||||
|
||||
ci := ©in{
|
||||
cn: cn,
|
||||
buffer: make([]byte, 0, ciBufferSize),
|
||||
rowData: make(chan []byte),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
// add CopyData identifier + 4 bytes for message length
|
||||
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
|
||||
|
||||
b := cn.writeBuf('Q')
|
||||
b.string(q)
|
||||
cn.send(b)
|
||||
|
||||
awaitCopyInResponse:
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'G':
|
||||
if r.byte() != 0 {
|
||||
err = errBinaryCopyNotSupported
|
||||
break awaitCopyInResponse
|
||||
}
|
||||
go ci.resploop()
|
||||
return ci, nil
|
||||
case 'H':
|
||||
err = errCopyToNotSupported
|
||||
break awaitCopyInResponse
|
||||
case 'E':
|
||||
err = parseError(r)
|
||||
case 'Z':
|
||||
if err == nil {
|
||||
ci.setBad()
|
||||
errorf("unexpected ReadyForQuery in response to COPY")
|
||||
}
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for copy query: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// something went wrong, abort COPY before we return
|
||||
b = cn.writeBuf('f')
|
||||
b.string(err.Error())
|
||||
cn.send(b)
|
||||
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'c', 'C', 'E':
|
||||
case 'Z':
|
||||
// correctly aborted, we're done
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for CopyFail: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) flush(buf []byte) {
|
||||
// set message length (without message identifier)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
|
||||
|
||||
_, err := ci.cn.c.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) resploop() {
|
||||
for {
|
||||
var r readBuf
|
||||
t, err := ci.cn.recvMessage(&r)
|
||||
if err != nil {
|
||||
ci.setBad()
|
||||
ci.setError(err)
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
switch t {
|
||||
case 'C':
|
||||
// complete
|
||||
case 'N':
|
||||
if n := ci.cn.noticeHandler; n != nil {
|
||||
n(parseError(&r))
|
||||
}
|
||||
case 'Z':
|
||||
ci.cn.processReadyForQuery(&r)
|
||||
ci.done <- true
|
||||
return
|
||||
case 'E':
|
||||
err := parseError(&r)
|
||||
ci.setError(err)
|
||||
default:
|
||||
ci.setBad()
|
||||
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) setBad() {
|
||||
ci.Lock()
|
||||
ci.cn.bad = true
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) isBad() bool {
|
||||
ci.Lock()
|
||||
b := ci.cn.bad
|
||||
ci.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
func (ci *copyin) isErrorSet() bool {
|
||||
ci.Lock()
|
||||
isSet := (ci.err != nil)
|
||||
ci.Unlock()
|
||||
return isSet
|
||||
}
|
||||
|
||||
// setError() sets ci.err if one has not been set already. Caller must not be
|
||||
// holding ci.Mutex.
|
||||
func (ci *copyin) setError(err error) {
|
||||
ci.Lock()
|
||||
if ci.err == nil {
|
||||
ci.err = err
|
||||
}
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
// Exec inserts values into the COPY stream. The insert is asynchronous
|
||||
// and Exec can return errors from previous Exec calls to the same
|
||||
// COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
|
||||
if ci.isBad() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if ci.isErrorSet() {
|
||||
return nil, ci.err
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
return driver.RowsAffected(0), ci.Close()
|
||||
}
|
||||
|
||||
numValues := len(v)
|
||||
for i, value := range v {
|
||||
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
|
||||
if i < numValues-1 {
|
||||
ci.buffer = append(ci.buffer, '\t')
|
||||
}
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
ci.flush(ci.buffer)
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() (err error) {
|
||||
if ci.closed { // Don't do anything, we're already closed
|
||||
return nil
|
||||
}
|
||||
ci.closed = true
|
||||
|
||||
if ci.isBad() {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if len(ci.buffer) > 0 {
|
||||
ci.flush(ci.buffer)
|
||||
}
|
||||
// Avoid touching the scratch buffer as resploop could be using it.
|
||||
err = ci.cn.sendSimpleMessage('c')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-ci.done
|
||||
ci.cn.inCopy = false
|
||||
|
||||
if ci.isErrorSet() {
|
||||
err = ci.err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
284
vendor/github.com/lib/pq/copy_LOCAL_6722.go
generated
vendored
Normal file
284
vendor/github.com/lib/pq/copy_LOCAL_6722.go
generated
vendored
Normal file
@ -0,0 +1,284 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
|
||||
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
|
||||
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
|
||||
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
|
||||
errCopyInProgress = errors.New("pq: COPY in progress")
|
||||
)
|
||||
|
||||
// CopyIn creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare(). The target table should be visible in search_path.
|
||||
func CopyIn(table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
// CopyInSchema creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare().
|
||||
func CopyInSchema(schema, table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
type copyin struct {
|
||||
cn *conn
|
||||
buffer []byte
|
||||
rowData chan []byte
|
||||
done chan bool
|
||||
|
||||
closed bool
|
||||
|
||||
sync.Mutex // guards err
|
||||
err error
|
||||
}
|
||||
|
||||
const ciBufferSize = 64 * 1024
|
||||
|
||||
// flush buffer before the buffer is filled up and needs reallocation
|
||||
const ciBufferFlushSize = 63 * 1024
|
||||
|
||||
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
|
||||
if !cn.isInTransaction() {
|
||||
return nil, errCopyNotSupportedOutsideTxn
|
||||
}
|
||||
|
||||
ci := ©in{
|
||||
cn: cn,
|
||||
buffer: make([]byte, 0, ciBufferSize),
|
||||
rowData: make(chan []byte),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
// add CopyData identifier + 4 bytes for message length
|
||||
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
|
||||
|
||||
b := cn.writeBuf('Q')
|
||||
b.string(q)
|
||||
cn.send(b)
|
||||
|
||||
awaitCopyInResponse:
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'G':
|
||||
if r.byte() != 0 {
|
||||
err = errBinaryCopyNotSupported
|
||||
break awaitCopyInResponse
|
||||
}
|
||||
go ci.resploop()
|
||||
return ci, nil
|
||||
case 'H':
|
||||
err = errCopyToNotSupported
|
||||
break awaitCopyInResponse
|
||||
case 'E':
|
||||
err = parseError(r)
|
||||
case 'Z':
|
||||
if err == nil {
|
||||
ci.setBad()
|
||||
errorf("unexpected ReadyForQuery in response to COPY")
|
||||
}
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for copy query: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// something went wrong, abort COPY before we return
|
||||
b = cn.writeBuf('f')
|
||||
b.string(err.Error())
|
||||
cn.send(b)
|
||||
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'c', 'C', 'E':
|
||||
case 'Z':
|
||||
// correctly aborted, we're done
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for CopyFail: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) flush(buf []byte) {
|
||||
// set message length (without message identifier)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
|
||||
|
||||
_, err := ci.cn.c.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) resploop() {
|
||||
for {
|
||||
var r readBuf
|
||||
t, err := ci.cn.recvMessage(&r)
|
||||
if err != nil {
|
||||
ci.setBad()
|
||||
ci.setError(err)
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
switch t {
|
||||
case 'C':
|
||||
// complete
|
||||
case 'N':
|
||||
if n := ci.cn.noticeHandler; n != nil {
|
||||
n(parseError(&r))
|
||||
}
|
||||
case 'Z':
|
||||
ci.cn.processReadyForQuery(&r)
|
||||
ci.done <- true
|
||||
return
|
||||
case 'E':
|
||||
err := parseError(&r)
|
||||
ci.setError(err)
|
||||
default:
|
||||
ci.setBad()
|
||||
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) setBad() {
|
||||
ci.Lock()
|
||||
ci.cn.bad = true
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) isBad() bool {
|
||||
ci.Lock()
|
||||
b := ci.cn.bad
|
||||
ci.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
func (ci *copyin) isErrorSet() bool {
|
||||
ci.Lock()
|
||||
isSet := (ci.err != nil)
|
||||
ci.Unlock()
|
||||
return isSet
|
||||
}
|
||||
|
||||
// setError() sets ci.err if one has not been set already. Caller must not be
|
||||
// holding ci.Mutex.
|
||||
func (ci *copyin) setError(err error) {
|
||||
ci.Lock()
|
||||
if ci.err == nil {
|
||||
ci.err = err
|
||||
}
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
// Exec inserts values into the COPY stream. The insert is asynchronous
|
||||
// and Exec can return errors from previous Exec calls to the same
|
||||
// COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
|
||||
if ci.isBad() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if ci.isErrorSet() {
|
||||
return nil, ci.err
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
return driver.RowsAffected(0), ci.Close()
|
||||
}
|
||||
|
||||
numValues := len(v)
|
||||
for i, value := range v {
|
||||
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
|
||||
if i < numValues-1 {
|
||||
ci.buffer = append(ci.buffer, '\t')
|
||||
}
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
ci.flush(ci.buffer)
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() (err error) {
|
||||
if ci.closed { // Don't do anything, we're already closed
|
||||
return nil
|
||||
}
|
||||
ci.closed = true
|
||||
|
||||
if ci.isBad() {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if len(ci.buffer) > 0 {
|
||||
ci.flush(ci.buffer)
|
||||
}
|
||||
// Avoid touching the scratch buffer as resploop could be using it.
|
||||
err = ci.cn.sendSimpleMessage('c')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-ci.done
|
||||
ci.cn.inCopy = false
|
||||
|
||||
if ci.isErrorSet() {
|
||||
err = ci.err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
282
vendor/github.com/lib/pq/copy_REMOTE_6648.go
generated
vendored
Normal file
282
vendor/github.com/lib/pq/copy_REMOTE_6648.go
generated
vendored
Normal file
@ -0,0 +1,282 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
|
||||
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
|
||||
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
|
||||
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
|
||||
errCopyInProgress = errors.New("pq: COPY in progress")
|
||||
)
|
||||
|
||||
// CopyIn creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare(). The target table should be visible in search_path.
|
||||
func CopyIn(table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
// CopyInSchema creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare().
|
||||
func CopyInSchema(schema, table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
type copyin struct {
|
||||
cn *conn
|
||||
buffer []byte
|
||||
rowData chan []byte
|
||||
done chan bool
|
||||
|
||||
closed bool
|
||||
|
||||
sync.Mutex // guards err
|
||||
err error
|
||||
}
|
||||
|
||||
const ciBufferSize = 64 * 1024
|
||||
|
||||
// flush buffer before the buffer is filled up and needs reallocation
|
||||
const ciBufferFlushSize = 63 * 1024
|
||||
|
||||
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
|
||||
if !cn.isInTransaction() {
|
||||
return nil, errCopyNotSupportedOutsideTxn
|
||||
}
|
||||
|
||||
ci := ©in{
|
||||
cn: cn,
|
||||
buffer: make([]byte, 0, ciBufferSize),
|
||||
rowData: make(chan []byte),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
// add CopyData identifier + 4 bytes for message length
|
||||
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
|
||||
|
||||
b := cn.writeBuf('Q')
|
||||
b.string(q)
|
||||
cn.send(b)
|
||||
|
||||
awaitCopyInResponse:
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'G':
|
||||
if r.byte() != 0 {
|
||||
err = errBinaryCopyNotSupported
|
||||
break awaitCopyInResponse
|
||||
}
|
||||
go ci.resploop()
|
||||
return ci, nil
|
||||
case 'H':
|
||||
err = errCopyToNotSupported
|
||||
break awaitCopyInResponse
|
||||
case 'E':
|
||||
err = parseError(r)
|
||||
case 'Z':
|
||||
if err == nil {
|
||||
ci.setBad()
|
||||
errorf("unexpected ReadyForQuery in response to COPY")
|
||||
}
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for copy query: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// something went wrong, abort COPY before we return
|
||||
b = cn.writeBuf('f')
|
||||
b.string(err.Error())
|
||||
cn.send(b)
|
||||
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'c', 'C', 'E':
|
||||
case 'Z':
|
||||
// correctly aborted, we're done
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for CopyFail: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) flush(buf []byte) {
|
||||
// set message length (without message identifier)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
|
||||
|
||||
_, err := ci.cn.c.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) resploop() {
|
||||
for {
|
||||
var r readBuf
|
||||
t, err := ci.cn.recvMessage(&r)
|
||||
if err != nil {
|
||||
ci.setBad()
|
||||
ci.setError(err)
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
switch t {
|
||||
case 'C':
|
||||
// complete
|
||||
case 'N':
|
||||
// NoticeResponse
|
||||
case 'Z':
|
||||
ci.cn.processReadyForQuery(&r)
|
||||
ci.done <- true
|
||||
return
|
||||
case 'E':
|
||||
err := parseError(&r)
|
||||
ci.setError(err)
|
||||
default:
|
||||
ci.setBad()
|
||||
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) setBad() {
|
||||
ci.Lock()
|
||||
ci.cn.bad = true
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) isBad() bool {
|
||||
ci.Lock()
|
||||
b := ci.cn.bad
|
||||
ci.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
func (ci *copyin) isErrorSet() bool {
|
||||
ci.Lock()
|
||||
isSet := (ci.err != nil)
|
||||
ci.Unlock()
|
||||
return isSet
|
||||
}
|
||||
|
||||
// setError() sets ci.err if one has not been set already. Caller must not be
|
||||
// holding ci.Mutex.
|
||||
func (ci *copyin) setError(err error) {
|
||||
ci.Lock()
|
||||
if ci.err == nil {
|
||||
ci.err = err
|
||||
}
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
// Exec inserts values into the COPY stream. The insert is asynchronous
|
||||
// and Exec can return errors from previous Exec calls to the same
|
||||
// COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
|
||||
if ci.isBad() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if ci.isErrorSet() {
|
||||
return nil, ci.err
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
return nil, ci.Close()
|
||||
}
|
||||
|
||||
numValues := len(v)
|
||||
for i, value := range v {
|
||||
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
|
||||
if i < numValues-1 {
|
||||
ci.buffer = append(ci.buffer, '\t')
|
||||
}
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
ci.flush(ci.buffer)
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() (err error) {
|
||||
if ci.closed { // Don't do anything, we're already closed
|
||||
return nil
|
||||
}
|
||||
ci.closed = true
|
||||
|
||||
if ci.isBad() {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if len(ci.buffer) > 0 {
|
||||
ci.flush(ci.buffer)
|
||||
}
|
||||
// Avoid touching the scratch buffer as resploop could be using it.
|
||||
err = ci.cn.sendSimpleMessage('c')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-ci.done
|
||||
ci.cn.inCopy = false
|
||||
|
||||
if ci.isErrorSet() {
|
||||
err = ci.err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
282
vendor/github.com/lib/pq/copy_REMOTE_6722.go
generated
vendored
Normal file
282
vendor/github.com/lib/pq/copy_REMOTE_6722.go
generated
vendored
Normal file
@ -0,0 +1,282 @@
|
||||
package pq
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
|
||||
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
|
||||
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
|
||||
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
|
||||
errCopyInProgress = errors.New("pq: COPY in progress")
|
||||
)
|
||||
|
||||
// CopyIn creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare(). The target table should be visible in search_path.
|
||||
func CopyIn(table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
// CopyInSchema creates a COPY FROM statement which can be prepared with
|
||||
// Tx.Prepare().
|
||||
func CopyInSchema(schema, table string, columns ...string) string {
|
||||
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
|
||||
for i, col := range columns {
|
||||
if i != 0 {
|
||||
stmt += ", "
|
||||
}
|
||||
stmt += QuoteIdentifier(col)
|
||||
}
|
||||
stmt += ") FROM STDIN"
|
||||
return stmt
|
||||
}
|
||||
|
||||
type copyin struct {
|
||||
cn *conn
|
||||
buffer []byte
|
||||
rowData chan []byte
|
||||
done chan bool
|
||||
|
||||
closed bool
|
||||
|
||||
sync.Mutex // guards err
|
||||
err error
|
||||
}
|
||||
|
||||
const ciBufferSize = 64 * 1024
|
||||
|
||||
// flush buffer before the buffer is filled up and needs reallocation
|
||||
const ciBufferFlushSize = 63 * 1024
|
||||
|
||||
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
|
||||
if !cn.isInTransaction() {
|
||||
return nil, errCopyNotSupportedOutsideTxn
|
||||
}
|
||||
|
||||
ci := ©in{
|
||||
cn: cn,
|
||||
buffer: make([]byte, 0, ciBufferSize),
|
||||
rowData: make(chan []byte),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
// add CopyData identifier + 4 bytes for message length
|
||||
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
|
||||
|
||||
b := cn.writeBuf('Q')
|
||||
b.string(q)
|
||||
cn.send(b)
|
||||
|
||||
awaitCopyInResponse:
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'G':
|
||||
if r.byte() != 0 {
|
||||
err = errBinaryCopyNotSupported
|
||||
break awaitCopyInResponse
|
||||
}
|
||||
go ci.resploop()
|
||||
return ci, nil
|
||||
case 'H':
|
||||
err = errCopyToNotSupported
|
||||
break awaitCopyInResponse
|
||||
case 'E':
|
||||
err = parseError(r)
|
||||
case 'Z':
|
||||
if err == nil {
|
||||
ci.setBad()
|
||||
errorf("unexpected ReadyForQuery in response to COPY")
|
||||
}
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for copy query: %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// something went wrong, abort COPY before we return
|
||||
b = cn.writeBuf('f')
|
||||
b.string(err.Error())
|
||||
cn.send(b)
|
||||
|
||||
for {
|
||||
t, r := cn.recv1()
|
||||
switch t {
|
||||
case 'c', 'C', 'E':
|
||||
case 'Z':
|
||||
// correctly aborted, we're done
|
||||
cn.processReadyForQuery(r)
|
||||
return nil, err
|
||||
default:
|
||||
ci.setBad()
|
||||
errorf("unknown response for CopyFail: %q", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) flush(buf []byte) {
|
||||
// set message length (without message identifier)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
|
||||
|
||||
_, err := ci.cn.c.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) resploop() {
|
||||
for {
|
||||
var r readBuf
|
||||
t, err := ci.cn.recvMessage(&r)
|
||||
if err != nil {
|
||||
ci.setBad()
|
||||
ci.setError(err)
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
switch t {
|
||||
case 'C':
|
||||
// complete
|
||||
case 'N':
|
||||
// NoticeResponse
|
||||
case 'Z':
|
||||
ci.cn.processReadyForQuery(&r)
|
||||
ci.done <- true
|
||||
return
|
||||
case 'E':
|
||||
err := parseError(&r)
|
||||
ci.setError(err)
|
||||
default:
|
||||
ci.setBad()
|
||||
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
|
||||
ci.done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *copyin) setBad() {
|
||||
ci.Lock()
|
||||
ci.cn.bad = true
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) isBad() bool {
|
||||
ci.Lock()
|
||||
b := ci.cn.bad
|
||||
ci.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
func (ci *copyin) isErrorSet() bool {
|
||||
ci.Lock()
|
||||
isSet := (ci.err != nil)
|
||||
ci.Unlock()
|
||||
return isSet
|
||||
}
|
||||
|
||||
// setError() sets ci.err if one has not been set already. Caller must not be
|
||||
// holding ci.Mutex.
|
||||
func (ci *copyin) setError(err error) {
|
||||
ci.Lock()
|
||||
if ci.err == nil {
|
||||
ci.err = err
|
||||
}
|
||||
ci.Unlock()
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
// Exec inserts values into the COPY stream. The insert is asynchronous
|
||||
// and Exec can return errors from previous Exec calls to the same
|
||||
// COPY stmt.
|
||||
//
|
||||
// You need to call Exec(nil) to sync the COPY stream and to get any
|
||||
// errors from pending data, since Stmt.Close() doesn't return errors
|
||||
// to the user.
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errCopyInClosed
|
||||
}
|
||||
|
||||
if ci.isBad() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if ci.isErrorSet() {
|
||||
return nil, ci.err
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
return nil, ci.Close()
|
||||
}
|
||||
|
||||
numValues := len(v)
|
||||
for i, value := range v {
|
||||
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
|
||||
if i < numValues-1 {
|
||||
ci.buffer = append(ci.buffer, '\t')
|
||||
}
|
||||
}
|
||||
|
||||
ci.buffer = append(ci.buffer, '\n')
|
||||
|
||||
if len(ci.buffer) > ciBufferFlushSize {
|
||||
ci.flush(ci.buffer)
|
||||
// reset buffer, keep bytes for message identifier and length
|
||||
ci.buffer = ci.buffer[:5]
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() (err error) {
|
||||
if ci.closed { // Don't do anything, we're already closed
|
||||
return nil
|
||||
}
|
||||
ci.closed = true
|
||||
|
||||
if ci.isBad() {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
defer ci.cn.errRecover(&err)
|
||||
|
||||
if len(ci.buffer) > 0 {
|
||||
ci.flush(ci.buffer)
|
||||
}
|
||||
// Avoid touching the scratch buffer as resploop could be using it.
|
||||
err = ci.cn.sendSimpleMessage('c')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-ci.done
|
||||
ci.cn.inCopy = false
|
||||
|
||||
if ci.isErrorSet() {
|
||||
err = ci.err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
2
vendor/github.com/lib/pq/go.mod
generated
vendored
2
vendor/github.com/lib/pq/go.mod
generated
vendored
@ -1,6 +1,6 @@
|
||||
module github.com/lib/pq
|
||||
|
||||
go 1.13
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5 // indirect
|
||||
|
7
vendor/modules.txt
vendored
7
vendor/modules.txt
vendored
@ -7,8 +7,6 @@ github.com/antchfx/htmlquery
|
||||
# github.com/antchfx/xpath v1.0.0
|
||||
## explicit
|
||||
github.com/antchfx/xpath
|
||||
# github.com/go-sql-driver/mysql v1.4.1
|
||||
## explicit
|
||||
# github.com/gobuffalo/here v0.6.0
|
||||
github.com/gobuffalo/here
|
||||
# github.com/gobuffalo/logger v1.0.3
|
||||
@ -79,6 +77,11 @@ github.com/jcmturner/gokrb5/v8/types
|
||||
# github.com/jcmturner/rpc/v2 v2.0.2
|
||||
github.com/jcmturner/rpc/v2/mstypes
|
||||
github.com/jcmturner/rpc/v2/ndr
|
||||
# github.com/jinzhu/gorm v1.9.12
|
||||
## explicit
|
||||
github.com/jinzhu/gorm
|
||||
# github.com/jinzhu/inflection v1.0.0
|
||||
github.com/jinzhu/inflection
|
||||
# github.com/jmoiron/sqlx v1.2.0
|
||||
## explicit
|
||||
github.com/jmoiron/sqlx
|
||||
|
Loading…
Reference in New Issue
Block a user