changes for xorm branch

This commit is contained in:
Paul 2020-07-19 19:32:22 +02:00
parent f80a43faf1
commit c8cbd8b6b4
258 changed files with 42408 additions and 10325 deletions

View File

@ -19,6 +19,9 @@ func main() {
cfg.Version = version
err := database.Initialize(&cfg)
if err != nil {
log.Fatalln(err)
}
defer cfg.Db.Close()
err = qrz.InitCronConfig(cfg)

5
go.mod
View File

@ -6,13 +6,14 @@ require (
github.com/antchfx/htmlquery v1.0.0
github.com/antchfx/xpath v1.0.0 // indirect
github.com/gobuffalo/packr/v2 v2.8.0
github.com/jinzhu/gorm v1.9.12
github.com/jmoiron/sqlx v1.2.0
github.com/labstack/echo/v4 v4.1.16
github.com/lib/pq v1.6.0
github.com/markbates/pkger v0.14.0
github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect
github.com/robfig/cron v1.2.0
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect
google.golang.org/appengine v1.6.2 // indirect
gopkg.in/ini.v1 v1.46.0
xorm.io/builder v0.3.7
xorm.io/xorm v1.0.2
)

94
go.sum
View File

@ -1,6 +1,12 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5 h1:P5U+E4x5OkVEKQDklVPmzs71WM56RTTRqV4OrDC//Y4=
@ -9,6 +15,7 @@ github.com/antchfx/htmlquery v1.0.0 h1:O5IXz8fZF3B3MW+B33MZWbTHBlYmcfw0BAxgErHua
github.com/antchfx/htmlquery v1.0.0/go.mod h1:MS9yksVSQXls00iXkiMqXr0J+umL/AmxXKuP28SUJM8=
github.com/antchfx/xpath v1.0.0 h1:Q5gFgh2O40VTSwMOVbFE7nFNRBu3tS21Tn0KAWeEjtk=
github.com/antchfx/xpath v1.0.0/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -24,18 +31,18 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -48,18 +55,27 @@ github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fd
github.com/gobuffalo/packr/v2 v2.8.0 h1:IULGd15bQL59ijXLxEvA5wlMxsmx/ZkQv9T282zNVIY=
github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ=
@ -70,7 +86,10 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
@ -85,15 +104,8 @@ github.com/jcmturner/gokrb5/v8 v8.2.0 h1:lzPl/30ZLkTveYsYZPKMcgXc8MbnE6RsTd4F9Kg
github.com/jcmturner/gokrb5/v8 v8.2.0/go.mod h1:T1hnNppQsBtxW0tCHMHTkAt8n/sABdzZgZdoFrZaZNM=
github.com/jcmturner/rpc/v2 v2.0.2 h1:gMB4IwRXYsWw4Bc6o/az2HJgFUA1ffSh90i26ZJ6Xl0=
github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jinzhu/gorm v1.9.12 h1:Drgk1clyWT9t9ERbzHza6Mj/8FY/CqMyVzOiHviMo6Q=
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.0.1 h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@ -116,7 +128,6 @@ github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.6.0 h1:I5DPxhYJChW9KYc66se+oKFFQX6VuQrKiprsX6ivRZc=
github.com/lib/pq v1.6.0/go.mod h1:4vXEAYvW1fRQ2/FhZ78H73A60MHw1geSm145z2mdY1g=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@ -135,8 +146,7 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -144,19 +154,31 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
@ -189,6 +211,8 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
@ -198,7 +222,9 @@ github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@ -208,19 +234,25 @@ golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 h1:QmwruyY+bKbDDL0BaglrbZABEali68eoMFhTZpCjYVA=
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
@ -230,16 +262,20 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -250,13 +286,18 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -265,11 +306,16 @@ golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
@ -278,6 +324,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.46.0 h1:VeDZbLYGaupuvIrsYCEOe/L/2Pcs5n7hdO1ZTjporag=
gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
@ -286,10 +334,18 @@ gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mN
gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI=
xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
xorm.io/xorm v1.0.2 h1:kZlCh9rqd1AzGwWitcrEEqHE1h1eaZE/ujU5/2tWEtg=
xorm.io/xorm v1.0.2/go.mod h1:o4vnEsQ5V2F1/WK6w4XTwmiWJeGj82tqjAnHe44wVHY=

8
main-packr.go Normal file
View File

@ -0,0 +1,8 @@
// +build !skippackr
// Code generated by github.com/gobuffalo/packr/v2. DO NOT EDIT.
// You can use the "packr clean" command to clean up this,
// and any other packr generated files.
package main
import _ "git.paulbsd.com/paulbsd/qrz/packrd"

View File

@ -2,11 +2,10 @@ package config
import (
"flag"
"fmt"
"git.paulbsd.com/paulbsd/qrz/utils"
"github.com/jmoiron/sqlx"
"gopkg.in/ini.v1"
"xorm.io/xorm"
)
// GetConfig fetch configuration
@ -35,91 +34,26 @@ func (config *Config) GetConfig() error {
config.DbName = qrzsection.Key("db_name").MustString("database")
config.DbUsername = qrzsection.Key("db_username").MustString("username")
config.DbPassword = qrzsection.Key("db_password").MustString("password")
config.DbSchema = qrzsection.Key("db_schema").MustString("")
config.DbTable = qrzsection.Key("db_table").MustString("qrz")
config.Cron = qrzsection.Key("cron").MustString("@every 1h")
config.DbSchema = fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %[1]s (
id serial NOT NULL PRIMARY KEY,
qrz varchar(25) NOT NULL,
name varchar(25) DEFAULT NULL,
address varchar(50) DEFAULT NULL,
city varchar(50) DEFAULT NULL,
zipcode varchar(5) DEFAULT NULL,
dept varchar(50) DEFAULT NULL,
country varchar(25) DEFAULT NULL,
dmrid varchar(25) DEFAULT NULL,
fts tsvector NULL,
CONSTRAINT qrz_unique UNIQUE(qrz, name, city, dept, country)
);
CREATE INDEX IF NOT EXISTS fts_idx ON %[1]s (fts);
CREATE OR REPLACE FUNCTION updateFTS() RETURNS trigger AS $fts_trigger$
BEGIN
IF pg_trigger_depth() <> 1 THEN
RETURN NEW;
END IF;
UPDATE %[1]s SET fts = to_tsvector(concat_ws(' ', qrz, name, city, dept, country)) WHERE id = NEW.id;
RETURN NEW;
END;
$fts_trigger$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS fts_trigger ON %[1]s;
CREATE TRIGGER fts_trigger
AFTER INSERT ON %[1]s
FOR EACH ROW EXECUTE PROCEDURE updateFTS();
`,
config.DbTable)
config.DbStatements.Insert = fmt.Sprintf(`
INSERT INTO %[1]s (qrz, dmrid, name, city, dept, country)
VALUES ($1, $2, $3, $4, $5, $6);
`,
config.DbTable)
config.DbStatements.ExportCSV = fmt.Sprintf(`
SELECT qrz, name, city, dept, country
FROM %[1]s;
`,
config.DbTable)
config.DbStatements.Countries = fmt.Sprintf(`
SELECT country
FROM %[1]s
GROUP BY country;
`,
config.DbTable)
config.URLBase = `http://groupe-frs.hamstation.eu/index_qrz_liste_%s.php`
config.QrzGroups = []string{"01", "03", "09", "103", "104", "107", "119", "13", "14", "146", "147", "15", "156", "16", "161", "163", "18", "188", "2", "214", "233", "25", "26", "29", "30", "31", "32", "34", "43", "44", "49", "54", "64", "66", "76", "79", "84", "97", "98"}
//config.QrzGroups = map[string]string{"France": "14"}
config.QrzGroups = []string{"01", "03", "09", "103", "104", "107", "119", "13", "146", "147", "15", "156", "16", "161", "163", "18", "188", "2", "214", "233", "25", "26", "29", "30", "31", "32", "34", "43", "44", "49", "54", "64", "66", "76", "79", "84", "97", "98"}
return nil
}
// Config is the global config of qrz
type Config struct {
Db *sqlx.DB
DbHostname string
DbName string
DbUsername string
DbPassword string
DbSchema string
DbTable string
DbStatements struct {
Insert string
ExportCSV string
Countries string
}
URLBase string
QrzGroups []string
Cron string
Port int
NoFeed bool
Version string
Db *xorm.Engine
DbHostname string
DbName string
DbUsername string
DbPassword string
URLBase string
QrzGroups []string
Cron string
Port int
NoFeed bool
Version string
}

View File

@ -5,20 +5,22 @@ import (
"log"
"git.paulbsd.com/paulbsd/qrz/src/config"
"github.com/jmoiron/sqlx"
"git.paulbsd.com/paulbsd/qrz/src/qrz"
_ "github.com/lib/pq"
"xorm.io/xorm"
"xorm.io/xorm/names"
)
// Initialize creates connection to database and exec Schema
func Initialize(config *config.Config) (err error) {
config.Db, err = sqlx.Connect("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s", config.DbUsername, config.DbPassword, config.DbHostname, config.DbName))
config.Db, err = xorm.NewEngine("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s", config.DbUsername, config.DbPassword, config.DbHostname, config.DbName))
if err != nil {
log.Fatalln(err)
}
_, err = config.Db.Exec(config.DbSchema)
if err != nil {
log.Fatalln(err)
}
config.Db.SetMapper(names.GonicMapper{})
config.Db.CreateTables(qrz.Qrz{})
config.Db.ShowSQL(true)
return
}

View File

@ -1,18 +1,15 @@
package qrz
import (
"database/sql"
"fmt"
"io/ioutil"
"log"
"net/http"
"regexp"
"strings"
"time"
"git.paulbsd.com/paulbsd/qrz/src/config"
"github.com/antchfx/htmlquery"
"github.com/jinzhu/gorm"
"github.com/robfig/cron"
)
@ -86,8 +83,8 @@ func getBody(url string) (string, error) {
}
// getFrsEntries get FRS entries from html body
func getFrsEntries(config config.Config, body string) (frsentries map[string]FrsEntry, err error) {
frsentries = make(map[string]FrsEntry)
func getFrsEntries(config config.Config, body string) (frsentries map[string]Qrz, err error) {
frsentries = make(map[string]Qrz)
re := regexp.MustCompile(`^ [0-9]{1,4}\s[A-Z]{1,4}\s[0-9]{1,4}`)
htmlpage, err := htmlquery.Parse(strings.NewReader(body))
@ -99,21 +96,21 @@ func getFrsEntries(config config.Config, body string) (frsentries map[string]Frs
td := htmlquery.Find(n, "//td")
if len(td) > 2 {
if re.MatchString(htmlquery.InnerText(td[1])) {
frs := FrsEntry{
frs := Qrz{
QRZ: strings.TrimLeft(htmlquery.InnerText(td[1]), " "),
DMRID: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[0]), " "), Valid: true},
Name: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[2]), " "), Valid: true},
City: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[3]), " "), Valid: true},
Dept: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[4]), " "), Valid: true},
Country: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[5]), " "), Valid: true}}
DMRID: strings.TrimLeft(htmlquery.InnerText(td[0]), " "),
Name: strings.TrimLeft(htmlquery.InnerText(td[2]), " "),
City: strings.TrimLeft(htmlquery.InnerText(td[3]), " "),
Dept: strings.TrimLeft(htmlquery.InnerText(td[4]), " "),
Country: strings.TrimLeft(htmlquery.InnerText(td[5]), " ")}
frsentries[frs.QRZ] = frs
} else if re.MatchString(htmlquery.InnerText(td[0])) {
frs := FrsEntry{
frs := Qrz{
QRZ: strings.TrimLeft(htmlquery.InnerText(td[0]), " "),
Name: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[1]), " "), Valid: true},
City: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[2]), " "), Valid: true},
Dept: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[3]), " "), Valid: true},
Country: sql.NullString{String: strings.TrimLeft(htmlquery.InnerText(td[4]), " "), Valid: true}}
Name: strings.TrimLeft(htmlquery.InnerText(td[1]), " "),
City: strings.TrimLeft(htmlquery.InnerText(td[2]), " "),
Dept: strings.TrimLeft(htmlquery.InnerText(td[3]), " "),
Country: strings.TrimLeft(htmlquery.InnerText(td[4]), " ")}
frsentries[frs.QRZ] = frs
}
}
@ -123,18 +120,14 @@ func getFrsEntries(config config.Config, body string) (frsentries map[string]Frs
}
// getCurrentEntries fetch existing entries from database
func getCurrentEntries(config config.Config) (existingQRZ []FrsEntry, err error) {
query := `
SELECT *
FROM %s;
`
err = config.Db.Select(&existingQRZ, fmt.Sprintf(query, config.DbTable))
func getCurrentEntries(config config.Config) (qrz []Qrz, err error) {
err = config.Db.Find(&qrz)
return
}
// discardExistingEntries remove existing entries from original map[string]FrsEntry
func discardExistingEntries(config config.Config, frsPeople *map[string]FrsEntry, existingQRZ []FrsEntry) (err error) {
// discardExistingEntries remove existing entries from original map[string]Qrz
func discardExistingEntries(config config.Config, frsPeople *map[string]Qrz, existingQRZ []Qrz) (err error) {
for _, entry := range existingQRZ {
delete(*frsPeople, entry.QRZ)
}
@ -143,18 +136,29 @@ func discardExistingEntries(config config.Config, frsPeople *map[string]FrsEntry
}
// insertFrsEntryToDB inserts frs entries to database
func insertFrsEntryToDB(config config.Config, frsPeople map[string]FrsEntry) (err error) {
tx := config.Db.MustBegin()
func insertFrsEntryToDB(config config.Config, frsPeople map[string]Qrz) (err error) {
session := config.Db.NewSession()
defer session.Close()
err = session.Begin()
var qrzNum int
fmt.Println(fmt.Sprintf("Starting inserts of %d entries", len(frsPeople)))
for _, frs := range frsPeople {
tx.MustExec(config.DbStatements.Insert, frs.QRZ, frs.DMRID, frs.Name, frs.City, frs.Dept, frs.Country)
fmt.Println(frs)
session.Insert(&frs)
qrzNum++
if (qrzNum % 5) == 0 {
err = session.Commit()
if err != nil {
return err
}
}
}
err = tx.Commit()
err = session.Commit()
if err != nil {
return err
}
@ -163,30 +167,15 @@ func insertFrsEntryToDB(config config.Config, frsPeople map[string]FrsEntry) (er
return nil
}
// FrsEntry describe FRS people
type FrsEntry struct {
ID int `db:"id"`
QRZ string `db:"qrz"`
DMRID sql.NullString `db:"dmrid"`
Name sql.NullString `db:"name"`
Address sql.NullString `db:"address"`
City sql.NullString `db:"city"`
ZipCode sql.NullString `db:"zipcode"`
Dept sql.NullString `db:"dept"`
Country sql.NullString `db:"country"`
Fts sql.NullString `db:"fts"`
}
// Qrz is the structure of qrz entries
// Qrz describe FRS people
type Qrz struct {
gorm.Model
Name string
Age sql.NullInt64
Birthday *time.Time
Email string `gorm:"type:varchar(100);unique_index"`
Role string `gorm:"size:255"` // set field size to 255
MemberNumber *string `gorm:"unique;not null"` // set member number to unique and not null
Num int `gorm:"AUTO_INCREMENT"` // set num to auto incrementable
Address string `gorm:"index:addr"` // create index with name `addr` for address
IgnoreMe int `gorm:"-"` // ignore this field
ID int `db:"id" xorm:"pk autoincr"`
QRZ string `db:"qrz" xorm:"varchar(25) notnull"`
DMRID string `db:"dmrid" xorm:"varchar(25) notnull"`
Name string `db:"name" xorm:"varchar(25) notnull"`
Address string `db:"address" xorm:"varchar(50) notnull"`
City string `db:"city" xorm:"varchar(50) notnull"`
Zipcode string `db:"zipcode" xorm:"varchar(5) notnull"`
Dept string `db:"dept" xorm:"varchar(25) notnull"`
Country string `db:"country" xorm:"varchar(25) notnull"`
}

View File

@ -2,7 +2,6 @@ package qrzws
import (
"fmt"
"log"
"net/http"
"regexp"
"strings"
@ -13,7 +12,7 @@ import (
"git.paulbsd.com/paulbsd/qrz/src/static"
"git.paulbsd.com/paulbsd/qrz/src/templates"
"github.com/gobuffalo/packr/v2"
"github.com/jmoiron/sqlx"
"github.com/labstack/echo/v4"
)
@ -41,12 +40,19 @@ func RunServer(config config.Config) (err error) {
}
return c.JSON(http.StatusOK, res)
})
e.GET("/export_frs.csv", func(c echo.Context) (err error) {
/*e.GET("/export_frs.csv", func(c echo.Context) (err error) {
data, mime, err := RunCSVExport(c, config)
if err != nil {
log.Fatalln(err)
}
return c.Blob(http.StatusOK, mime, data)
})*/
e.GET("/test", func(c echo.Context) (err error) {
res, err := Test(c, config)
if err != nil {
return c.String(http.StatusInternalServerError, fmt.Sprintf("Erreur 500 ta mère: %d", res))
}
return c.JSON(http.StatusOK, res)
})
if !config.NoFeed {
@ -56,10 +62,17 @@ func RunServer(config config.Config) (err error) {
return
}
// Test runs the main loop
func Test(c echo.Context, config config.Config) (cnt int64, err error) {
//config.Db.Where("qrz LIKE '%%' OR name LIKE '%%' OR city LIKE '%%' OR dept LIKE '%%' OR country LIKE '%%' OR dmrid LIKE '%%'").Limit(50, 2).Desc("id").Select("*").Find(&qrz)
cnt, err = config.Db.SQL("select count(*) from qrz").Count()
return
}
// Run runs the main loop
func Run(c echo.Context, config config.Config) (res QrzDatatableOutput, err error) {
qrzinputjson := new(QrzDatatableInput)
var count int
if err = c.Bind(qrzinputjson); err != nil {
return
@ -70,89 +83,81 @@ func Run(c echo.Context, config config.Config) (res QrzDatatableOutput, err erro
fmt.Println(err)
return
}
for rows.Next() {
var line []string
results := make(map[string]interface{})
err = rows.MapScan(results)
colslice, _ := rows.Columns()
for _, column := range colslice {
if results[column] == nil {
line = append(line, "")
} else {
line = append(line, fmt.Sprintf("%s", results[column]))
}
}
res.Data = append(res.Data, line)
count++
for _, b := range rows {
res.Data = append(res.Data, b)
}
rows.Close()
res.RecordsFiltered, err = BuildQueryCountFiltered(config, *qrzinputjson)
if err != nil {
fmt.Println(err, "filtered")
return
}
res.RecordsTotal, err = BuildQueryCountTotal(config, *qrzinputjson)
if err != nil {
fmt.Println(err, "total")
return
}
res.Draw = qrzinputjson.Draw
return
}
// BuildQuery builds main query
func BuildQuery(config config.Config, qrzdt QrzDatatableInput) (rows *sqlx.Rows, err error) {
var selectstatement, orderstatement, limitstatement, searchstatement string
selectstatement, err = SetSelectStatement(config, qrzdt)
if err != nil {
return nil, err
}
orderstatement, err = SetOrderStatement(config, qrzdt)
if err != nil {
return nil, err
}
limitstatement, err = SetLimitStatement(config, qrzdt)
if err != nil {
return nil, err
}
searchstatement, err = SetSearchLikeStatement(config, qrzdt)
func BuildQuery(config config.Config, qrzdt QrzDatatableInput) (qrz []qrz.Qrz, err error) {
selectstatement, err := SetSelect(config, qrzdt)
if err != nil {
return nil, err
}
query := `
SELECT %s
FROM %s
WHERE %s %s %s;`
orderdir, ordercol, err := SetOrder(config, qrzdt)
if err != nil {
return nil, err
}
rows, err = config.Db.Queryx(fmt.Sprintf(query, selectstatement, config.DbTable, searchstatement, orderstatement, limitstatement))
limit, offset, err := SetLimit(config, qrzdt)
if err != nil {
return nil, err
}
searchstatement, err := SetSearchLike(config, qrzdt)
if err != nil {
return nil, err
}
if orderdir == "DESC" {
err = config.Db.Where(searchstatement).Limit(limit, offset).Desc(ordercol...).Select(selectstatement).Find(&qrz)
} else if orderdir == "ASC" {
err = config.Db.Where(searchstatement).Limit(limit, offset).Asc(ordercol...).Select(selectstatement).Find(&qrz)
} else {
err = config.Db.Where(searchstatement).Limit(limit, offset).Asc("qrz").Select(selectstatement).Find(&qrz)
}
return
}
// BuildQueryCountFiltered builds query for counting filtered
func BuildQueryCountFiltered(config config.Config, qrzdt QrzDatatableInput) (cnt int, err error) {
searchstatement, err := SetSearchLikeStatement(config, qrzdt)
query := `
SELECT COUNT(*)
FROM %s
WHERE %s;`
err = config.Db.Get(&cnt, fmt.Sprintf(query, config.DbTable, searchstatement))
func BuildQueryCountFiltered(config config.Config, qrzdt QrzDatatableInput) (cnt int64, err error) {
searchstatement, err := SetSearchLike(config, qrzdt)
cnt, err = config.Db.SQL(fmt.Sprintf("select count(*) from qrz WHERE %s", searchstatement)).Count()
return
}
// BuildQueryCountTotal builds query for counting totals
func BuildQueryCountTotal(config config.Config, qrzdt QrzDatatableInput) (cnt int, err error) {
query := `
SELECT COUNT(*) FROM %s;
`
err = config.Db.Get(&cnt, fmt.Sprintf(query, config.DbTable))
func BuildQueryCountTotal(config config.Config, qrzdt QrzDatatableInput) (cnt int64, err error) {
cnt, err = config.Db.SQL("select count(*) from qrz").Count()
return
}
// SetSelectStatement build the sql select statement part
func SetSelectStatement(config config.Config, qrzdt QrzDatatableInput) (selectstatement string, err error) {
// SetSelect build the sql select statement part
func SetSelect(config config.Config, qrzdt QrzDatatableInput) (selectstatement string, err error) {
var cols []string
colre := regexp.MustCompile(`^[A-Za-z0-9]+$`)
if len(qrzdt.Columns) > 0 {
for _, col := range qrzdt.Columns {
valid := colre.MatchString(col.Name)
@ -171,42 +176,45 @@ func SetSelectStatement(config config.Config, qrzdt QrzDatatableInput) (selectst
return
}
// SetOrderStatement build the sql order statement part
func SetOrderStatement(config config.Config, qrzdt QrzDatatableInput) (orderstmt string, err error) {
var orderstmts []string
// SetOrder build the sql order statement part
func SetOrder(config config.Config, qrzdt QrzDatatableInput) (orderdir string, ordercols []string, err error) {
colre := regexp.MustCompile(`^[A-Za-z0-9]+$`)
orderre := regexp.MustCompile(`^(ASC|asc|DESC|desc)$`)
for _, col := range qrzdt.Order {
if colre.MatchString(qrzdt.Columns[col.Column].Name) && orderre.MatchString(col.Dir) {
orderstmts = append(orderstmts, fmt.Sprintf("%s %s", qrzdt.Columns[col.Column].Name, col.Dir))
ordercols = append(ordercols, fmt.Sprintf("%s", qrzdt.Columns[col.Column].Name))
orderdir = col.Dir
} else {
err = fmt.Errorf("Order statements does not match prerequisites")
return
}
}
if len(orderstmts) > 0 {
orderstmt = fmt.Sprintf("ORDER BY %s", strings.Join(orderstmts, ", "))
}
return
}
// SetLimitStatement build the sql limit statement part
func SetLimitStatement(config config.Config, qrzdt QrzDatatableInput) (limitstmt string, err error) {
// SetLimit build the sql limit
func SetLimit(config config.Config, qrzdt QrzDatatableInput) (limit int, offset int, err error) {
intre := regexp.MustCompile(`^[0-9]+$`)
if qrzdt.Length < 1 {
qrzdt.Length = 50
}
if intre.MatchString(fmt.Sprintf("%d", qrzdt.Length)) && intre.MatchString(fmt.Sprintf("%d", qrzdt.Start)) {
limitstmt = fmt.Sprintf("LIMIT %d OFFSET %d", qrzdt.Length, qrzdt.Start)
limit = qrzdt.Length
offset = qrzdt.Start
} else {
err = fmt.Errorf("Limit statements does not match prerequisites")
return
}
return
}
// SetSearchLikeStatement build the where clause in sql statement
func SetSearchLikeStatement(config config.Config, qrzdt QrzDatatableInput) (searchstmt string, err error) {
// SetSearchLike build the where clause in sql statement
func SetSearchLike(config config.Config, qrzdt QrzDatatableInput) (searchstmt string, err error) {
var searchstmtslice []string
if len(qrzdt.Columns) > 0 {
for id, i := range qrzdt.Columns {
@ -219,24 +227,17 @@ func SetSearchLikeStatement(config config.Config, qrzdt QrzDatatableInput) (sear
} else {
searchstmtslice = []string{"1=1"}
}
searchstmt = strings.Join(searchstmtslice, " ")
return
}
// SetSearchFTSStatement build the where clause in sql statement
func SetSearchFTSStatement(config config.Config, qrzdt QrzDatatableInput) (searchstmt string, err error) {
if len(qrzdt.Columns) > 0 {
searchstmt = fmt.Sprintf("fts @@ plainto_tsquery('%s')", qrzdt.Search.Value)
} else {
searchstmt = "1=1"
}
return
}
/*
// RunCSVExport runs the main loop
func RunCSVExport(c echo.Context, config config.Config) (data []byte, mime string, err error) {
mime = "text/csv"
rows, err := config.Db.Queryx(config.DbStatements.ExportCSV)
res, err := config.Db.Exec(config.DbStatements.ExportCSV)
var res []string
res = append(res, "QRZ,Name,City,Dept,Country")
for rows.Next() {
@ -258,6 +259,7 @@ func RunCSVExport(c echo.Context, config config.Config) (data []byte, mime strin
return
}
*/
// QrzDatatableInput describes datatable query
type QrzDatatableInput struct {
@ -286,8 +288,8 @@ type QrzDatatableInput struct {
// QrzDatatableOutput describes datatable response
type QrzDatatableOutput struct {
Data [][]string `json:"data"`
Draw int `json:"draw"`
RecordsTotal int `json:"recordsTotal"`
RecordsFiltered int `json:"recordsFiltered"`
Data []qrz.Qrz `json:"data"`
Draw int `json:"draw"`
RecordsTotal int64 `json:"recordsTotal"`
RecordsFiltered int64 `json:"recordsFiltered"`
}

16
vendor/github.com/golang/snappy/.gitignore generated vendored Normal file
View File

@ -0,0 +1,16 @@
cmd/snappytool/snappytool
testdata/bench
# These explicitly listed benchmark data files are for an obsolete version of
# snappy_test.go.
testdata/alice29.txt
testdata/asyoulik.txt
testdata/fireworks.jpeg
testdata/geo.protodata
testdata/html
testdata/html_x_4
testdata/kppkn.gtb
testdata/lcet10.txt
testdata/paper-100k.pdf
testdata/plrabn12.txt
testdata/urls.10K

15
vendor/github.com/golang/snappy/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,15 @@
# This is the official list of Snappy-Go authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

37
vendor/github.com/golang/snappy/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,37 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the Snappy-Go repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# http://code.google.com/legal/individual-cla-v1.0.html
# http://code.google.com/legal/corporate-cla-v1.0.html
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Jan Mercl <0xjnml@gmail.com>
Kai Backman <kaib@golang.org>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

27
vendor/github.com/golang/snappy/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

107
vendor/github.com/golang/snappy/README generated vendored Normal file
View File

@ -0,0 +1,107 @@
The Snappy compression format in the Go programming language.
To download and install from source:
$ go get github.com/golang/snappy
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.
Benchmarks.
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
"go test -test.bench=."
_UFlat0-8 2.19GB/s ± 0% html
_UFlat1-8 1.41GB/s ± 0% urls
_UFlat2-8 23.5GB/s ± 2% jpg
_UFlat3-8 1.91GB/s ± 0% jpg_200
_UFlat4-8 14.0GB/s ± 1% pdf
_UFlat5-8 1.97GB/s ± 0% html4
_UFlat6-8 814MB/s ± 0% txt1
_UFlat7-8 785MB/s ± 0% txt2
_UFlat8-8 857MB/s ± 0% txt3
_UFlat9-8 719MB/s ± 1% txt4
_UFlat10-8 2.84GB/s ± 0% pb
_UFlat11-8 1.05GB/s ± 0% gaviota
_ZFlat0-8 1.04GB/s ± 0% html
_ZFlat1-8 534MB/s ± 0% urls
_ZFlat2-8 15.7GB/s ± 1% jpg
_ZFlat3-8 740MB/s ± 3% jpg_200
_ZFlat4-8 9.20GB/s ± 1% pdf
_ZFlat5-8 991MB/s ± 0% html4
_ZFlat6-8 379MB/s ± 0% txt1
_ZFlat7-8 352MB/s ± 0% txt2
_ZFlat8-8 396MB/s ± 1% txt3
_ZFlat9-8 327MB/s ± 1% txt4
_ZFlat10-8 1.33GB/s ± 1% pb
_ZFlat11-8 605MB/s ± 1% gaviota
"go test -test.bench=. -tags=noasm"
_UFlat0-8 621MB/s ± 2% html
_UFlat1-8 494MB/s ± 1% urls
_UFlat2-8 23.2GB/s ± 1% jpg
_UFlat3-8 1.12GB/s ± 1% jpg_200
_UFlat4-8 4.35GB/s ± 1% pdf
_UFlat5-8 609MB/s ± 0% html4
_UFlat6-8 296MB/s ± 0% txt1
_UFlat7-8 288MB/s ± 0% txt2
_UFlat8-8 309MB/s ± 1% txt3
_UFlat9-8 280MB/s ± 1% txt4
_UFlat10-8 753MB/s ± 0% pb
_UFlat11-8 400MB/s ± 0% gaviota
_ZFlat0-8 409MB/s ± 1% html
_ZFlat1-8 250MB/s ± 1% urls
_ZFlat2-8 12.3GB/s ± 1% jpg
_ZFlat3-8 132MB/s ± 0% jpg_200
_ZFlat4-8 2.92GB/s ± 0% pdf
_ZFlat5-8 405MB/s ± 1% html4
_ZFlat6-8 179MB/s ± 1% txt1
_ZFlat7-8 170MB/s ± 1% txt2
_ZFlat8-8 189MB/s ± 1% txt3
_ZFlat9-8 164MB/s ± 1% txt4
_ZFlat10-8 479MB/s ± 1% pb
_ZFlat11-8 270MB/s ± 1% gaviota
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
are the numbers from C++ Snappy's
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
BM_UFlat/0 2.4GB/s html
BM_UFlat/1 1.4GB/s urls
BM_UFlat/2 21.8GB/s jpg
BM_UFlat/3 1.5GB/s jpg_200
BM_UFlat/4 13.3GB/s pdf
BM_UFlat/5 2.1GB/s html4
BM_UFlat/6 1.0GB/s txt1
BM_UFlat/7 959.4MB/s txt2
BM_UFlat/8 1.0GB/s txt3
BM_UFlat/9 864.5MB/s txt4
BM_UFlat/10 2.9GB/s pb
BM_UFlat/11 1.2GB/s gaviota
BM_ZFlat/0 944.3MB/s html (22.31 %)
BM_ZFlat/1 501.6MB/s urls (47.78 %)
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
BM_ZFlat/10 1.2GB/s pb (19.68 %)
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)

237
vendor/github.com/golang/snappy/decode.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrTooLarge reports that the uncompressed length is too large.
ErrTooLarge = errors.New("snappy: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrTooLarge
}
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
decodeErrCodeUnsupportedLiteralLength = 2
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= len(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
switch decode(dst, src[s:]) {
case 0:
return dst, nil
case decodeErrCodeUnsupportedLiteralLength:
return nil, errUnsupportedLiteralLength
}
return nil, ErrCorrupt
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxBlockSize),
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
}
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4], true) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen], false) {
return 0, r.err
}
}
}

14
vendor/github.com/golang/snappy/decode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func decode(dst, src []byte) int

490
vendor/github.com/golang/snappy/decode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,490 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - AX scratch
// - BX scratch
// - CX length or x
// - DX offset
// - SI &src[s]
// - DI &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
TEXT ·decode(SB), NOSPLIT, $48-56
// Initialize SI, DI and R8-R13.
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, DI
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, SI
MOVQ R11, R13
ADDQ R12, R13
loop:
// for s < len(src)
CMPQ SI, R13
JEQ end
// CX = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (SI), CX
MOVL CX, BX
ANDL $3, BX
CMPL BX, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, CX
CMPL CX, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ SI
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that CX == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// CX can hold 64 bits, so the increment cannot overflow.
INCQ CX
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// AX = len(dst) - d
// BX = len(src) - s
MOVQ R10, AX
SUBQ DI, AX
MOVQ R13, BX
SUBQ SI, BX
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ CX, $16
JGT callMemmove
CMPQ AX, $16
JLT callMemmove
CMPQ BX, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(SI), X0
MOVOU X0, 0(DI)
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ CX, AX
JGT errCorrupt
CMPQ CX, BX
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, R13
ADDQ R12, R13
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
CMPL CX, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(SI), CX
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(SI), CX
JMP doLit
tagLit62Plus:
CMPL CX, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVWLZX -3(SI), CX
MOVBLZX -1(SI), BX
SHLL $16, BX
ORL BX, CX
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(SI), CX
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-5])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVLQZX -4(SI), DX
JMP doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVWQZX -2(SI), DX
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - BX == src[s] & 0x03
// - CX == src[s]
CMPQ BX, $2
JEQ tagCopy2
JA tagCopy4
// case tagCopy1:
// s += 2
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVQ CX, DX
ANDQ $0xe0, DX
SHLQ $3, DX
MOVBQZX -1(SI), BX
ORQ BX, DX
// length = 4 + int(src[s-2])>>2&0x7
SHRQ $2, CX
ANDQ $7, CX
ADDQ $4, CX
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - CX == length && CX > 0
// - DX == offset
// if offset <= 0 { etc }
CMPQ DX, $0
JLE errCorrupt
// if d < offset { etc }
MOVQ DI, BX
SUBQ R8, BX
CMPQ BX, DX
JLT errCorrupt
// if length > len(dst)-d { etc }
MOVQ R10, BX
SUBQ DI, BX
CMPQ CX, BX
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVQ R10, R14
SUBQ DI, R14
MOVQ DI, R15
SUBQ DX, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ CX, $16
JGT slowForwardCopy
CMPQ DX, $8
JLT slowForwardCopy
CMPQ R14, $16
JLT slowForwardCopy
MOVQ 0(R15), AX
MOVQ AX, 0(DI)
MOVQ 8(R15), BX
MOVQ BX, 8(DI)
ADDQ CX, DI
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R14
CMPQ CX, R14
JGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMPQ DX, $8
JGE fixUpSlowForwardCopy
MOVQ (R15), BX
MOVQ BX, (DI)
SUBQ DX, CX
ADDQ DX, DI
ADDQ DX, DX
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by DI being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save DI to AX so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ DI, AX
ADDQ CX, DI
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ CX, $0
JLE loop
MOVQ (R15), BX
MOVQ BX, (AX)
ADDQ $8, R15
ADDQ $8, AX
SUBQ $8, CX
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), BX
MOVB BX, (DI)
INCQ R15
INCQ DI
DECQ CX
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ DI, R10
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET

101
vendor/github.com/golang/snappy/decode_other.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func decode(dst, src []byte) int {
var d, s, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length <= 0 {
return decodeErrCodeUnsupportedLiteralLength
}
if length > len(dst)-d || length > len(src)-s {
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
// the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
for end := d + length; d != end; d++ {
dst[d] = dst[d-offset]
}
}
if d != len(dst) {
return decodeErrCodeCorrupt
}
return 0
}

285
vendor/github.com/golang/snappy/encode.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return dst[:d]
}
// inputMargin is the minimum number of extra input bytes to keep, inside
// encodeBlock's inner loop. On some architectures, this margin lets us
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
// literals can be implemented as a single load to and store from a 16-byte
// register. That literal's actual length can be as short as 1 byte, so this
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
// that we don't overrun the dst and src buffers.
const inputMargin = 16 - 1
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
// could be encoded with a copy tag. This is the minimum with respect to the
// algorithm used by encodeBlock, not a minimum enforced by the file format.
//
// The encoded output must start with at least a 1 byte literal, as there are
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
// from an emitCopy call in encodeBlock's main loop, would require at least
// another inputMargin bytes, for the reason above: we want any emitLiteral
// calls inside encodeBlock's main loop to use the fast path if possible, which
// requires being able to overrun by inputMargin bytes. Thus,
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
//
// The C++ code doesn't use this exact threshold, but it could, as discussed at
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
// optimization. It should not affect the encoded form. This is tested by
// TestSameEncodingAsCppShortCopies.
const minNonLiteralBlockSize = 1 + 1 + inputMargin
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if n > 0xffffffff {
return -1
}
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
n = 32 + n + n/6
if n > 0xffffffff {
return -1
}
return int(n)
}
var errClosed = errors.New("snappy: Writer is closed")
// NewWriter returns a new Writer that compresses to w.
//
// The Writer returned does not buffer writes. There is no need to Flush or
// Close such a Writer.
//
// Deprecated: the Writer returned is not suitable for many small writes, only
// for few large writes. Use NewBufferedWriter instead, which is efficient
// regardless of the frequency and shape of the writes, and remember to Close
// that Writer when done.
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
obuf: make([]byte, obufLen),
}
}
// NewBufferedWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// The Writer returned buffers writes. Users must call Close to guarantee all
// data has been forwarded to the underlying io.Writer. They may also call
// Flush zero or more times before calling Close.
func NewBufferedWriter(w io.Writer) *Writer {
return &Writer{
w: w,
ibuf: make([]byte, 0, maxBlockSize),
obuf: make([]byte, obufLen),
}
}
// Writer is an io.Writer that can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
// ibuf is a buffer for the incoming (uncompressed) bytes.
//
// Its use is optional. For backwards compatibility, Writers created by the
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
// therefore do not need to be Flush'ed or Close'd.
ibuf []byte
// obuf is a buffer for the outgoing (compressed) bytes.
obuf []byte
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
if w.ibuf != nil {
w.ibuf = w.ibuf[:0]
}
w.wroteStreamHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if w.ibuf == nil {
// Do not buffer incoming bytes. This does not perform or compress well
// if the caller of Writer.Write writes many small slices. This
// behavior is therefore deprecated, but still supported for backwards
// compatibility with code that doesn't explicitly Flush or Close.
return w.write(p)
}
// The remainder of this method is based on bufio.Writer.Write from the
// standard library.
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.Flush()
}
nRet += n
p = p[n:]
}
if w.err != nil {
return nRet, w.err
}
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if w.err != nil {
return 0, w.err
}
for len(p) > 0 {
obufStart := len(magicChunk)
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
copy(w.obuf, magicChunk)
obufStart = 0
}
var uncompressed []byte
if len(p) > maxBlockSize {
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
chunkType := uint8(chunkTypeCompressedData)
chunkLen := 4 + len(compressed)
obufEnd := obufHeaderLen + len(compressed)
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
chunkType = chunkTypeUncompressedData
chunkLen = 4 + len(uncompressed)
obufEnd = obufHeaderLen
}
// Fill in the per-chunk header that comes before the body.
w.obuf[len(magicChunk)+0] = chunkType
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
w.err = err
return nRet, err
}
if chunkType == chunkTypeUncompressedData {
if _, err := w.w.Write(uncompressed); err != nil {
w.err = err
return nRet, err
}
}
nRet += len(uncompressed)
}
return nRet, nil
}
// Flush flushes the Writer to its underlying io.Writer.
func (w *Writer) Flush() error {
if w.err != nil {
return w.err
}
if len(w.ibuf) == 0 {
return nil
}
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err
}
// Close calls Flush and then closes the Writer.
func (w *Writer) Close() error {
w.Flush()
ret := w.err
if w.err == nil {
w.err = errClosed
}
return ret
}

29
vendor/github.com/golang/snappy/encode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// emitLiteral has the same semantics as in encode_other.go.
//
//go:noescape
func emitLiteral(dst, lit []byte) int
// emitCopy has the same semantics as in encode_other.go.
//
//go:noescape
func emitCopy(dst []byte, offset, length int) int
// extendMatch has the same semantics as in encode_other.go.
//
//go:noescape
func extendMatch(src []byte, i, j int) int
// encodeBlock has the same semantics as in encode_other.go.
//
//go:noescape
func encodeBlock(dst, src []byte) (d int)

730
vendor/github.com/golang/snappy/encode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,730 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
// https://github.com/golang/snappy/issues/29
//
// As a workaround, the package was built with a known good assembler, and
// those instructions were disassembled by "objdump -d" to yield the
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
// style comments, in AT&T asm syntax. Note that rsp here is a physical
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
// fine on Go 1.6.
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - AX len(lit)
// - BX n
// - DX return value
// - DI &dst[i]
// - R10 &lit[0]
//
// The 24 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
MOVQ dst_base+0(FP), DI
MOVQ lit_base+24(FP), R10
MOVQ lit_len+32(FP), AX
MOVQ AX, DX
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT oneByte
CMPL BX, $256
JLT twoBytes
threeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
ADDQ $3, DX
JMP memmove
twoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
ADDQ $2, DX
JMP memmove
oneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
ADDQ $1, DX
memmove:
MOVQ DX, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - AX length
// - SI &dst[0]
// - DI &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVQ dst_base+0(FP), DI
MOVQ DI, SI
MOVQ offset+24(FP), R11
MOVQ length+32(FP), AX
loop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP loop0
step1:
// if length > 64 { etc }
CMPL AX, $64
JLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMPL AX, $12
JGE step3
CMPL R11, $2048
JGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - DX &src[0]
// - SI &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVQ src_base+0(FP), DX
MOVQ src_len+8(FP), R14
MOVQ i+24(FP), R15
MOVQ j+32(FP), SI
ADDQ DX, R14
ADDQ DX, R15
ADDQ DX, SI
MOVQ R14, R13
SUBQ $8, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA cmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, R15
ADDQ $8, SI
JMP cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE extendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE extendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - AX . .
// - BX . .
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
// - DX 64 &src[0], tableSize
// - SI 72 &src[s]
// - DI 80 &dst[d]
// - R9 88 sLimit
// - R10 . &src[nextEmit]
// - R11 96 prevHash, currHash, nextHash, offset
// - R12 104 &src[base], skip
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 112 candidate
//
// The second column (56, 64, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
TEXT ·encodeBlock(SB), 0, $32888-56
MOVQ dst_base+0(FP), DI
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVQ $24, CX
MOVQ $256, DX
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
CMPQ DX, $16384
JGE varTable
CMPQ DX, R14
JGE varTable
SUBQ $1, CX
SHLQ $1, DX
JMP calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
// 2048 writes that would zero-initialize all of table's 32768 bytes.
SHRQ $3, DX
LEAQ table-32768(SP), BX
PXOR X0, X0
memclr:
MOVOU X0, 0(BX)
ADDQ $16, BX
SUBQ $1, DX
JNZ memclr
// !!! DX = &src[0]
MOVQ SI, DX
// sLimit := len(src) - inputMargin
MOVQ R14, R9
SUBQ $15, R9
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
// change for the rest of the function.
MOVQ CX, 56(SP)
MOVQ DX, 64(SP)
MOVQ R9, 88(SP)
// nextEmit := 0
MOVQ DX, R10
// s := 1
ADDQ $1, SI
// nextHash := hash(load32(src, s), shift)
MOVL 0(SI), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
outer:
// for { etc }
// skip := 32
MOVQ $32, R12
// nextS := s
MOVQ SI, R13
// candidate := 0
MOVQ $0, R15
inner0:
// for { etc }
// s := nextS
MOVQ R13, SI
// bytesBetweenHashLookups := skip >> 5
MOVQ R12, R14
SHRQ $5, R14
// nextS = s + bytesBetweenHashLookups
ADDQ R14, R13
// skip += bytesBetweenHashLookups
ADDQ R14, R12
// if nextS > sLimit { goto emitRemainder }
MOVQ R13, AX
SUBQ DX, AX
CMPQ AX, R9
JA emitRemainder
// candidate = int(table[nextHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[nextHash] = uint16(s)
MOVQ SI, AX
SUBQ DX, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// nextHash = hash(load32(src, nextS), shift)
MOVL 0(R13), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVL 0(SI), AX
MOVL (DX)(R15*1), BX
CMPL AX, BX
JNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVQ SI, AX
SUBQ R10, AX
CMPQ AX, $16
JLE emitLiteralFastPath
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT inlineEmitLiteralOneByte
CMPL BX, $256
JLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
MOVQ R15, 112(SP)
CALL runtime·memmove(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
MOVQ 112(SP), R15
JMP inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB AX, BX
SUBB $1, BX
SHLB $2, BX
MOVB BX, (DI)
ADDQ $1, DI
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(R10), X0
MOVOU X0, 0(DI)
ADDQ AX, DI
inner1:
// for { etc }
// base := s
MOVQ SI, R12
// !!! offset := base - candidate
MOVQ R12, R11
SUBQ R15, R11
SUBQ DX, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
// !!! R14 = &src[len(src)]
MOVQ src_len+32(FP), R14
ADDQ DX, R14
// !!! R13 = &src[len(src) - 8]
MOVQ R14, R13
SUBQ $8, R13
// !!! R15 = &src[candidate + 4]
ADDQ $4, R15
ADDQ DX, R15
// !!! s += 4
ADDQ $4, SI
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA inlineExtendMatchCmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE inlineExtendMatchBSF
ADDQ $8, R15
ADDQ $8, SI
JMP inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
JMP inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE inlineExtendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE inlineExtendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// d += emitCopy(dst[d:], base-candidate, s-base)
// !!! length := s - base
MOVQ SI, AX
SUBQ R12, AX
inlineEmitCopyLoop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT inlineEmitCopyStep1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
CMPL AX, $64
JLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
CMPL AX, $12
JGE inlineEmitCopyStep3
CMPL R11, $2048
JGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
JMP inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVQ SI, R10
// if s >= sLimit { goto emitRemainder }
MOVQ SI, AX
SUBQ DX, AX
CMPQ AX, R9
JAE emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVQ -1(SI), R14
// prevHash := hash(uint32(x>>0), shift)
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// table[prevHash] = uint16(s-1)
MOVQ SI, AX
SUBQ DX, AX
SUBQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// currHash := hash(uint32(x>>8), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// candidate = int(table[currHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[currHash] = uint16(s)
ADDQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVL (DX)(R15*1), BX
CMPL R14, BX
JEQ inner1
// nextHash = hash(uint32(x>>16), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// s++
ADDQ $1, SI
// break out of the inner1 for loop, i.e. continue the outer loop.
JMP outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVQ src_len+32(FP), AX
ADDQ DX, AX
CMPQ R10, AX
JEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
SUBQ R10, AX
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ DI, 80(SP)
CALL ·emitLiteral(SB)
MOVQ 80(SP), DI
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
encodeBlockEnd:
MOVQ dst_base+0(FP), AX
SUBQ AX, DI
MOVQ DI, d+48(FP)
RET

238
vendor/github.com/golang/snappy/encode_other.go generated vendored Normal file
View File

@ -0,0 +1,238 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
func load32(b []byte, i int) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
default:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
}
return i + copy(dst[i:], lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
// length emitted down below is is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
for length >= 68 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[i+0] = 63<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 64
}
if length > 64 {
// Emit a length 60 copy, encoded as 3 bytes.
dst[i+0] = 59<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 60
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[i+0] = uint8(length-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
return i + 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
dst[i+1] = uint8(offset)
return i + 2
}
// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}
func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
const (
maxTableSize = 1 << 14
// tableMask is redundant, but helps the compiler eliminate bounds
// checks.
tableMask = maxTableSize - 1
)
shift := uint32(32 - 8)
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
shift--
}
// In Go, all array elements are zero-initialized, so there is no advantage
// to a smaller tableSize per se. However, it matches the C++ algorithm,
// and in the asm versions of this code, we can get away with zeroing only
// the first tableSize elements.
var table [maxTableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
nextHash := hash(load32(src, s), shift)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := 32
nextS := s
candidate := 0
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = int(table[nextHash&tableMask])
table[nextHash&tableMask] = uint16(s)
nextHash = hash(load32(src, nextS), shift)
if load32(src, s) == load32(src, candidate) {
break
}
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
// Extend the 4-byte match as long as possible.
//
// This is an inlined version of:
// s = extendMatch(src, candidate+4, s+4)
s += 4
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
}
d += emitCopy(dst[d:], base-candidate, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
prevHash := hash(uint32(x>>0), shift)
table[prevHash&tableMask] = uint16(s - 1)
currHash := hash(uint32(x>>8), shift)
candidate = int(table[currHash&tableMask])
table[currHash&tableMask] = uint16(s)
if uint32(x>>8) != load32(src, candidate) {
nextHash = hash(uint32(x>>16), shift)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

98
vendor/github.com/golang/snappy/snappy.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the Snappy compression format. It aims for very
// high speeds and reasonable compression.
//
// There are actually two Snappy formats: block and stream. They are related,
// but different: trying to decompress block-compressed data as a Snappy stream
// will fail, and vice versa. The block format is the Decode and Encode
// functions and the stream format is the Reader and Writer types.
//
// The block format, the more common case, is used when the complete size (the
// number of bytes) of the original data is known upfront, at the time
// compression starts. The stream format, also known as the framing format, is
// for when that isn't always true.
//
// The canonical, C++ implementation is at https://github.com/google/snappy and
// it only implements the block format.
package snappy // import "github.com/golang/snappy"
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer issued by most
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
integer denoted by the next 4 bytes.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
// part of the wire format per se, but some parts of the encoder assume
// that an offset fits into a uint16.
//
// Also, for the framing format (Writer type instead of Encode function),
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536
// bytes".
maxBlockSize = 65536
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
// hard coded to be a const instead of a variable, so that obufLen can also
// be a const. Their equivalence is confirmed by
// TestMaxEncodedLenOfMaxBlockSize.
maxEncodedLenOfMaxBlockSize = 76490
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

View File

@ -1,3 +0,0 @@
documents
coverage.txt
_book

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013-NOW Jinzhu <wosmvp@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,41 +0,0 @@
# GORM
The fantastic ORM library for Golang, aims to be developer friendly.
[![go report card](https://goreportcard.com/badge/github.com/jinzhu/gorm "go report card")](https://goreportcard.com/report/github.com/jinzhu/gorm)
[![wercker status](https://app.wercker.com/status/8596cace912c9947dd9c8542ecc8cb8b/s/master "wercker status")](https://app.wercker.com/project/byKey/8596cace912c9947dd9c8542ecc8cb8b)
[![codecov](https://codecov.io/gh/jinzhu/gorm/branch/master/graph/badge.svg)](https://codecov.io/gh/jinzhu/gorm)
[![Join the chat at https://gitter.im/jinzhu/gorm](https://img.shields.io/gitter/room/jinzhu/gorm.svg)](https://gitter.im/jinzhu/gorm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Open Collective Backer](https://opencollective.com/gorm/tiers/backer/badge.svg?label=backer&color=brightgreen "Open Collective Backer")](https://opencollective.com/gorm)
[![Open Collective Sponsor](https://opencollective.com/gorm/tiers/sponsor/badge.svg?label=sponsor&color=brightgreen "Open Collective Sponsor")](https://opencollective.com/gorm)
[![MIT license](https://img.shields.io/badge/license-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT)
[![GoDoc](https://godoc.org/github.com/jinzhu/gorm?status.svg)](https://godoc.org/github.com/jinzhu/gorm)
## Overview
* Full-Featured ORM (almost)
* Associations (Has One, Has Many, Belongs To, Many To Many, Polymorphism)
* Hooks (Before/After Create/Save/Update/Delete/Find)
* Preloading (eager loading)
* Transactions
* Composite Primary Key
* SQL Builder
* Auto Migrations
* Logger
* Extendable, write Plugins based on GORM callbacks
* Every feature comes with tests
* Developer Friendly
## Getting Started
* GORM Guides [https://gorm.io](https://gorm.io)
## Contributing
[You can help to deliver a better GORM, check out things you can do](https://gorm.io/contribute.html)
## License
© Jinzhu, 2013~time.Now
Released under the [MIT License](https://github.com/jinzhu/gorm/blob/master/License)

View File

@ -1,377 +0,0 @@
package gorm
import (
"errors"
"fmt"
"reflect"
)
// Association Mode contains some helper methods to handle relationship things easily.
type Association struct {
Error error
scope *Scope
column string
field *Field
}
// Find find out all related associations
func (association *Association) Find(value interface{}) *Association {
association.scope.related(value, association.column)
return association.setErr(association.scope.db.Error)
}
// Append append new associations for many2many, has_many, replace current association for has_one, belongs_to
func (association *Association) Append(values ...interface{}) *Association {
if association.Error != nil {
return association
}
if relationship := association.field.Relationship; relationship.Kind == "has_one" {
return association.Replace(values...)
}
return association.saveAssociations(values...)
}
// Replace replace current associations with new one
func (association *Association) Replace(values ...interface{}) *Association {
if association.Error != nil {
return association
}
var (
relationship = association.field.Relationship
scope = association.scope
field = association.field.Field
newDB = scope.NewDB()
)
// Append new values
association.field.Set(reflect.Zero(association.field.Field.Type()))
association.saveAssociations(values...)
// Belongs To
if relationship.Kind == "belongs_to" {
// Set foreign key to be null when clearing value (length equals 0)
if len(values) == 0 {
// Set foreign key to be nil
var foreignKeyMap = map[string]interface{}{}
for _, foreignKey := range relationship.ForeignDBNames {
foreignKeyMap[foreignKey] = nil
}
association.setErr(newDB.Model(scope.Value).UpdateColumn(foreignKeyMap).Error)
}
} else {
// Polymorphic Relations
if relationship.PolymorphicDBName != "" {
newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue)
}
// Delete Relations except new created
if len(values) > 0 {
var associationForeignFieldNames, associationForeignDBNames []string
if relationship.Kind == "many_to_many" {
// if many to many relations, get association fields name from association foreign keys
associationScope := scope.New(reflect.New(field.Type()).Interface())
for idx, dbName := range relationship.AssociationForeignFieldNames {
if field, ok := associationScope.FieldByName(dbName); ok {
associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
associationForeignDBNames = append(associationForeignDBNames, relationship.AssociationForeignDBNames[idx])
}
}
} else {
// If has one/many relations, use primary keys
for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() {
associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
associationForeignDBNames = append(associationForeignDBNames, field.DBName)
}
}
newPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, field.Interface())
if len(newPrimaryKeys) > 0 {
sql := fmt.Sprintf("%v NOT IN (%v)", toQueryCondition(scope, associationForeignDBNames), toQueryMarks(newPrimaryKeys))
newDB = newDB.Where(sql, toQueryValues(newPrimaryKeys)...)
}
}
if relationship.Kind == "many_to_many" {
// if many to many relations, delete related relations from join table
var sourceForeignFieldNames []string
for _, dbName := range relationship.ForeignFieldNames {
if field, ok := scope.FieldByName(dbName); ok {
sourceForeignFieldNames = append(sourceForeignFieldNames, field.Name)
}
}
if sourcePrimaryKeys := scope.getColumnAsArray(sourceForeignFieldNames, scope.Value); len(sourcePrimaryKeys) > 0 {
newDB = newDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(sourcePrimaryKeys)), toQueryValues(sourcePrimaryKeys)...)
association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB))
}
} else if relationship.Kind == "has_one" || relationship.Kind == "has_many" {
// has_one or has_many relations, set foreign key to be nil (TODO or delete them?)
var foreignKeyMap = map[string]interface{}{}
for idx, foreignKey := range relationship.ForeignDBNames {
foreignKeyMap[foreignKey] = nil
if field, ok := scope.FieldByName(relationship.AssociationForeignFieldNames[idx]); ok {
newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
}
}
fieldValue := reflect.New(association.field.Field.Type()).Interface()
association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error)
}
}
return association
}
// Delete remove relationship between source & passed arguments, but won't delete those arguments
func (association *Association) Delete(values ...interface{}) *Association {
if association.Error != nil {
return association
}
var (
relationship = association.field.Relationship
scope = association.scope
field = association.field.Field
newDB = scope.NewDB()
)
if len(values) == 0 {
return association
}
var deletingResourcePrimaryFieldNames, deletingResourcePrimaryDBNames []string
for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() {
deletingResourcePrimaryFieldNames = append(deletingResourcePrimaryFieldNames, field.Name)
deletingResourcePrimaryDBNames = append(deletingResourcePrimaryDBNames, field.DBName)
}
deletingPrimaryKeys := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, values...)
if relationship.Kind == "many_to_many" {
// source value's foreign keys
for idx, foreignKey := range relationship.ForeignDBNames {
if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok {
newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface())
}
}
// get association's foreign fields name
var associationScope = scope.New(reflect.New(field.Type()).Interface())
var associationForeignFieldNames []string
for _, associationDBName := range relationship.AssociationForeignFieldNames {
if field, ok := associationScope.FieldByName(associationDBName); ok {
associationForeignFieldNames = append(associationForeignFieldNames, field.Name)
}
}
// association value's foreign keys
deletingPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, values...)
sql := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(deletingPrimaryKeys))
newDB = newDB.Where(sql, toQueryValues(deletingPrimaryKeys)...)
association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB))
} else {
var foreignKeyMap = map[string]interface{}{}
for _, foreignKey := range relationship.ForeignDBNames {
foreignKeyMap[foreignKey] = nil
}
if relationship.Kind == "belongs_to" {
// find with deleting relation's foreign keys
primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, values...)
newDB = newDB.Where(
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
toQueryValues(primaryKeys)...,
)
// set foreign key to be null if there are some records affected
modelValue := reflect.New(scope.GetModelStruct().ModelType).Interface()
if results := newDB.Model(modelValue).UpdateColumn(foreignKeyMap); results.Error == nil {
if results.RowsAffected > 0 {
scope.updatedAttrsWithValues(foreignKeyMap)
}
} else {
association.setErr(results.Error)
}
} else if relationship.Kind == "has_one" || relationship.Kind == "has_many" {
// find all relations
primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value)
newDB = newDB.Where(
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
toQueryValues(primaryKeys)...,
)
// only include those deleting relations
newDB = newDB.Where(
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, deletingResourcePrimaryDBNames), toQueryMarks(deletingPrimaryKeys)),
toQueryValues(deletingPrimaryKeys)...,
)
// set matched relation's foreign key to be null
fieldValue := reflect.New(association.field.Field.Type()).Interface()
association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error)
}
}
// Remove deleted records from source's field
if association.Error == nil {
if field.Kind() == reflect.Slice {
leftValues := reflect.Zero(field.Type())
for i := 0; i < field.Len(); i++ {
reflectValue := field.Index(i)
primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, reflectValue.Interface())[0]
var isDeleted = false
for _, pk := range deletingPrimaryKeys {
if equalAsString(primaryKey, pk) {
isDeleted = true
break
}
}
if !isDeleted {
leftValues = reflect.Append(leftValues, reflectValue)
}
}
association.field.Set(leftValues)
} else if field.Kind() == reflect.Struct {
primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, field.Interface())[0]
for _, pk := range deletingPrimaryKeys {
if equalAsString(primaryKey, pk) {
association.field.Set(reflect.Zero(field.Type()))
break
}
}
}
}
return association
}
// Clear remove relationship between source & current associations, won't delete those associations
func (association *Association) Clear() *Association {
return association.Replace()
}
// Count return the count of current associations
func (association *Association) Count() int {
var (
count = 0
relationship = association.field.Relationship
scope = association.scope
fieldValue = association.field.Field.Interface()
query = scope.DB()
)
switch relationship.Kind {
case "many_to_many":
query = relationship.JoinTableHandler.JoinWith(relationship.JoinTableHandler, query, scope.Value)
case "has_many", "has_one":
primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value)
query = query.Where(
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)),
toQueryValues(primaryKeys)...,
)
case "belongs_to":
primaryKeys := scope.getColumnAsArray(relationship.ForeignFieldNames, scope.Value)
query = query.Where(
fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(primaryKeys)),
toQueryValues(primaryKeys)...,
)
}
if relationship.PolymorphicType != "" {
query = query.Where(
fmt.Sprintf("%v.%v = ?", scope.New(fieldValue).QuotedTableName(), scope.Quote(relationship.PolymorphicDBName)),
relationship.PolymorphicValue,
)
}
if err := query.Model(fieldValue).Count(&count).Error; err != nil {
association.Error = err
}
return count
}
// saveAssociations save passed values as associations
func (association *Association) saveAssociations(values ...interface{}) *Association {
var (
scope = association.scope
field = association.field
relationship = field.Relationship
)
saveAssociation := func(reflectValue reflect.Value) {
// value has to been pointer
if reflectValue.Kind() != reflect.Ptr {
reflectPtr := reflect.New(reflectValue.Type())
reflectPtr.Elem().Set(reflectValue)
reflectValue = reflectPtr
}
// value has to been saved for many2many
if relationship.Kind == "many_to_many" {
if scope.New(reflectValue.Interface()).PrimaryKeyZero() {
association.setErr(scope.NewDB().Save(reflectValue.Interface()).Error)
}
}
// Assign Fields
var fieldType = field.Field.Type()
var setFieldBackToValue, setSliceFieldBackToValue bool
if reflectValue.Type().AssignableTo(fieldType) {
field.Set(reflectValue)
} else if reflectValue.Type().Elem().AssignableTo(fieldType) {
// if field's type is struct, then need to set value back to argument after save
setFieldBackToValue = true
field.Set(reflectValue.Elem())
} else if fieldType.Kind() == reflect.Slice {
if reflectValue.Type().AssignableTo(fieldType.Elem()) {
field.Set(reflect.Append(field.Field, reflectValue))
} else if reflectValue.Type().Elem().AssignableTo(fieldType.Elem()) {
// if field's type is slice of struct, then need to set value back to argument after save
setSliceFieldBackToValue = true
field.Set(reflect.Append(field.Field, reflectValue.Elem()))
}
}
if relationship.Kind == "many_to_many" {
association.setErr(relationship.JoinTableHandler.Add(relationship.JoinTableHandler, scope.NewDB(), scope.Value, reflectValue.Interface()))
} else {
association.setErr(scope.NewDB().Select(field.Name).Save(scope.Value).Error)
if setFieldBackToValue {
reflectValue.Elem().Set(field.Field)
} else if setSliceFieldBackToValue {
reflectValue.Elem().Set(field.Field.Index(field.Field.Len() - 1))
}
}
}
for _, value := range values {
reflectValue := reflect.ValueOf(value)
indirectReflectValue := reflect.Indirect(reflectValue)
if indirectReflectValue.Kind() == reflect.Struct {
saveAssociation(reflectValue)
} else if indirectReflectValue.Kind() == reflect.Slice {
for i := 0; i < indirectReflectValue.Len(); i++ {
saveAssociation(indirectReflectValue.Index(i))
}
} else {
association.setErr(errors.New("invalid value type"))
}
}
return association
}
// setErr set error when the error is not nil. And return Association.
func (association *Association) setErr(err error) *Association {
if err != nil {
association.Error = err
}
return association
}

View File

@ -1,250 +0,0 @@
package gorm
import "fmt"
// DefaultCallback default callbacks defined by gorm
var DefaultCallback = &Callback{logger: nopLogger{}}
// Callback is a struct that contains all CRUD callbacks
// Field `creates` contains callbacks will be call when creating object
// Field `updates` contains callbacks will be call when updating object
// Field `deletes` contains callbacks will be call when deleting object
// Field `queries` contains callbacks will be call when querying object with query methods like Find, First, Related, Association...
// Field `rowQueries` contains callbacks will be call when querying object with Row, Rows...
// Field `processors` contains all callback processors, will be used to generate above callbacks in order
type Callback struct {
logger logger
creates []*func(scope *Scope)
updates []*func(scope *Scope)
deletes []*func(scope *Scope)
queries []*func(scope *Scope)
rowQueries []*func(scope *Scope)
processors []*CallbackProcessor
}
// CallbackProcessor contains callback informations
type CallbackProcessor struct {
logger logger
name string // current callback's name
before string // register current callback before a callback
after string // register current callback after a callback
replace bool // replace callbacks with same name
remove bool // delete callbacks with same name
kind string // callback type: create, update, delete, query, row_query
processor *func(scope *Scope) // callback handler
parent *Callback
}
func (c *Callback) clone(logger logger) *Callback {
return &Callback{
logger: logger,
creates: c.creates,
updates: c.updates,
deletes: c.deletes,
queries: c.queries,
rowQueries: c.rowQueries,
processors: c.processors,
}
}
// Create could be used to register callbacks for creating object
// db.Callback().Create().After("gorm:create").Register("plugin:run_after_create", func(*Scope) {
// // business logic
// ...
//
// // set error if some thing wrong happened, will rollback the creating
// scope.Err(errors.New("error"))
// })
func (c *Callback) Create() *CallbackProcessor {
return &CallbackProcessor{logger: c.logger, kind: "create", parent: c}
}
// Update could be used to register callbacks for updating object, refer `Create` for usage
func (c *Callback) Update() *CallbackProcessor {
return &CallbackProcessor{logger: c.logger, kind: "update", parent: c}
}
// Delete could be used to register callbacks for deleting object, refer `Create` for usage
func (c *Callback) Delete() *CallbackProcessor {
return &CallbackProcessor{logger: c.logger, kind: "delete", parent: c}
}
// Query could be used to register callbacks for querying objects with query methods like `Find`, `First`, `Related`, `Association`...
// Refer `Create` for usage
func (c *Callback) Query() *CallbackProcessor {
return &CallbackProcessor{logger: c.logger, kind: "query", parent: c}
}
// RowQuery could be used to register callbacks for querying objects with `Row`, `Rows`, refer `Create` for usage
func (c *Callback) RowQuery() *CallbackProcessor {
return &CallbackProcessor{logger: c.logger, kind: "row_query", parent: c}
}
// After insert a new callback after callback `callbackName`, refer `Callbacks.Create`
func (cp *CallbackProcessor) After(callbackName string) *CallbackProcessor {
cp.after = callbackName
return cp
}
// Before insert a new callback before callback `callbackName`, refer `Callbacks.Create`
func (cp *CallbackProcessor) Before(callbackName string) *CallbackProcessor {
cp.before = callbackName
return cp
}
// Register a new callback, refer `Callbacks.Create`
func (cp *CallbackProcessor) Register(callbackName string, callback func(scope *Scope)) {
if cp.kind == "row_query" {
if cp.before == "" && cp.after == "" && callbackName != "gorm:row_query" {
cp.logger.Print("info", fmt.Sprintf("Registering RowQuery callback %v without specify order with Before(), After(), applying Before('gorm:row_query') by default for compatibility...", callbackName))
cp.before = "gorm:row_query"
}
}
cp.logger.Print("info", fmt.Sprintf("[info] registering callback `%v` from %v", callbackName, fileWithLineNum()))
cp.name = callbackName
cp.processor = &callback
cp.parent.processors = append(cp.parent.processors, cp)
cp.parent.reorder()
}
// Remove a registered callback
// db.Callback().Create().Remove("gorm:update_time_stamp_when_create")
func (cp *CallbackProcessor) Remove(callbackName string) {
cp.logger.Print("info", fmt.Sprintf("[info] removing callback `%v` from %v", callbackName, fileWithLineNum()))
cp.name = callbackName
cp.remove = true
cp.parent.processors = append(cp.parent.processors, cp)
cp.parent.reorder()
}
// Replace a registered callback with new callback
// db.Callback().Create().Replace("gorm:update_time_stamp_when_create", func(*Scope) {
// scope.SetColumn("CreatedAt", now)
// scope.SetColumn("UpdatedAt", now)
// })
func (cp *CallbackProcessor) Replace(callbackName string, callback func(scope *Scope)) {
cp.logger.Print("info", fmt.Sprintf("[info] replacing callback `%v` from %v", callbackName, fileWithLineNum()))
cp.name = callbackName
cp.processor = &callback
cp.replace = true
cp.parent.processors = append(cp.parent.processors, cp)
cp.parent.reorder()
}
// Get registered callback
// db.Callback().Create().Get("gorm:create")
func (cp *CallbackProcessor) Get(callbackName string) (callback func(scope *Scope)) {
for _, p := range cp.parent.processors {
if p.name == callbackName && p.kind == cp.kind {
if p.remove {
callback = nil
} else {
callback = *p.processor
}
}
}
return
}
// getRIndex get right index from string slice
func getRIndex(strs []string, str string) int {
for i := len(strs) - 1; i >= 0; i-- {
if strs[i] == str {
return i
}
}
return -1
}
// sortProcessors sort callback processors based on its before, after, remove, replace
func sortProcessors(cps []*CallbackProcessor) []*func(scope *Scope) {
var (
allNames, sortedNames []string
sortCallbackProcessor func(c *CallbackProcessor)
)
for _, cp := range cps {
// show warning message the callback name already exists
if index := getRIndex(allNames, cp.name); index > -1 && !cp.replace && !cp.remove {
cp.logger.Print("warning", fmt.Sprintf("[warning] duplicated callback `%v` from %v", cp.name, fileWithLineNum()))
}
allNames = append(allNames, cp.name)
}
sortCallbackProcessor = func(c *CallbackProcessor) {
if getRIndex(sortedNames, c.name) == -1 { // if not sorted
if c.before != "" { // if defined before callback
if index := getRIndex(sortedNames, c.before); index != -1 {
// if before callback already sorted, append current callback just after it
sortedNames = append(sortedNames[:index], append([]string{c.name}, sortedNames[index:]...)...)
} else if index := getRIndex(allNames, c.before); index != -1 {
// if before callback exists but haven't sorted, append current callback to last
sortedNames = append(sortedNames, c.name)
sortCallbackProcessor(cps[index])
}
}
if c.after != "" { // if defined after callback
if index := getRIndex(sortedNames, c.after); index != -1 {
// if after callback already sorted, append current callback just before it
sortedNames = append(sortedNames[:index+1], append([]string{c.name}, sortedNames[index+1:]...)...)
} else if index := getRIndex(allNames, c.after); index != -1 {
// if after callback exists but haven't sorted
cp := cps[index]
// set after callback's before callback to current callback
if cp.before == "" {
cp.before = c.name
}
sortCallbackProcessor(cp)
}
}
// if current callback haven't been sorted, append it to last
if getRIndex(sortedNames, c.name) == -1 {
sortedNames = append(sortedNames, c.name)
}
}
}
for _, cp := range cps {
sortCallbackProcessor(cp)
}
var sortedFuncs []*func(scope *Scope)
for _, name := range sortedNames {
if index := getRIndex(allNames, name); !cps[index].remove {
sortedFuncs = append(sortedFuncs, cps[index].processor)
}
}
return sortedFuncs
}
// reorder all registered processors, and reset CRUD callbacks
func (c *Callback) reorder() {
var creates, updates, deletes, queries, rowQueries []*CallbackProcessor
for _, processor := range c.processors {
if processor.name != "" {
switch processor.kind {
case "create":
creates = append(creates, processor)
case "update":
updates = append(updates, processor)
case "delete":
deletes = append(deletes, processor)
case "query":
queries = append(queries, processor)
case "row_query":
rowQueries = append(rowQueries, processor)
}
}
}
c.creates = sortProcessors(creates)
c.updates = sortProcessors(updates)
c.deletes = sortProcessors(deletes)
c.queries = sortProcessors(queries)
c.rowQueries = sortProcessors(rowQueries)
}

View File

@ -1,197 +0,0 @@
package gorm
import (
"fmt"
"strings"
)
// Define callbacks for creating
func init() {
DefaultCallback.Create().Register("gorm:begin_transaction", beginTransactionCallback)
DefaultCallback.Create().Register("gorm:before_create", beforeCreateCallback)
DefaultCallback.Create().Register("gorm:save_before_associations", saveBeforeAssociationsCallback)
DefaultCallback.Create().Register("gorm:update_time_stamp", updateTimeStampForCreateCallback)
DefaultCallback.Create().Register("gorm:create", createCallback)
DefaultCallback.Create().Register("gorm:force_reload_after_create", forceReloadAfterCreateCallback)
DefaultCallback.Create().Register("gorm:save_after_associations", saveAfterAssociationsCallback)
DefaultCallback.Create().Register("gorm:after_create", afterCreateCallback)
DefaultCallback.Create().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
}
// beforeCreateCallback will invoke `BeforeSave`, `BeforeCreate` method before creating
func beforeCreateCallback(scope *Scope) {
if !scope.HasError() {
scope.CallMethod("BeforeSave")
}
if !scope.HasError() {
scope.CallMethod("BeforeCreate")
}
}
// updateTimeStampForCreateCallback will set `CreatedAt`, `UpdatedAt` when creating
func updateTimeStampForCreateCallback(scope *Scope) {
if !scope.HasError() {
now := scope.db.nowFunc()
if createdAtField, ok := scope.FieldByName("CreatedAt"); ok {
if createdAtField.IsBlank {
createdAtField.Set(now)
}
}
if updatedAtField, ok := scope.FieldByName("UpdatedAt"); ok {
if updatedAtField.IsBlank {
updatedAtField.Set(now)
}
}
}
}
// createCallback the callback used to insert data into database
func createCallback(scope *Scope) {
if !scope.HasError() {
defer scope.trace(NowFunc())
var (
columns, placeholders []string
blankColumnsWithDefaultValue []string
)
for _, field := range scope.Fields() {
if scope.changeableField(field) {
if field.IsNormal && !field.IsIgnored {
if field.IsBlank && field.HasDefaultValue {
blankColumnsWithDefaultValue = append(blankColumnsWithDefaultValue, scope.Quote(field.DBName))
scope.InstanceSet("gorm:blank_columns_with_default_value", blankColumnsWithDefaultValue)
} else if !field.IsPrimaryKey || !field.IsBlank {
columns = append(columns, scope.Quote(field.DBName))
placeholders = append(placeholders, scope.AddToVars(field.Field.Interface()))
}
} else if field.Relationship != nil && field.Relationship.Kind == "belongs_to" {
for _, foreignKey := range field.Relationship.ForeignDBNames {
if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) {
columns = append(columns, scope.Quote(foreignField.DBName))
placeholders = append(placeholders, scope.AddToVars(foreignField.Field.Interface()))
}
}
}
}
}
var (
returningColumn = "*"
quotedTableName = scope.QuotedTableName()
primaryField = scope.PrimaryField()
extraOption string
insertModifier string
)
if str, ok := scope.Get("gorm:insert_option"); ok {
extraOption = fmt.Sprint(str)
}
if str, ok := scope.Get("gorm:insert_modifier"); ok {
insertModifier = strings.ToUpper(fmt.Sprint(str))
if insertModifier == "INTO" {
insertModifier = ""
}
}
if primaryField != nil {
returningColumn = scope.Quote(primaryField.DBName)
}
lastInsertIDOutputInterstitial := scope.Dialect().LastInsertIDOutputInterstitial(quotedTableName, returningColumn, columns)
var lastInsertIDReturningSuffix string
if lastInsertIDOutputInterstitial == "" {
lastInsertIDReturningSuffix = scope.Dialect().LastInsertIDReturningSuffix(quotedTableName, returningColumn)
}
if len(columns) == 0 {
scope.Raw(fmt.Sprintf(
"INSERT%v INTO %v %v%v%v",
addExtraSpaceIfExist(insertModifier),
quotedTableName,
scope.Dialect().DefaultValueStr(),
addExtraSpaceIfExist(extraOption),
addExtraSpaceIfExist(lastInsertIDReturningSuffix),
))
} else {
scope.Raw(fmt.Sprintf(
"INSERT%v INTO %v (%v)%v VALUES (%v)%v%v",
addExtraSpaceIfExist(insertModifier),
scope.QuotedTableName(),
strings.Join(columns, ","),
addExtraSpaceIfExist(lastInsertIDOutputInterstitial),
strings.Join(placeholders, ","),
addExtraSpaceIfExist(extraOption),
addExtraSpaceIfExist(lastInsertIDReturningSuffix),
))
}
// execute create sql: no primaryField
if primaryField == nil {
if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
// set rows affected count
scope.db.RowsAffected, _ = result.RowsAffected()
// set primary value to primary field
if primaryField != nil && primaryField.IsBlank {
if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil {
scope.Err(primaryField.Set(primaryValue))
}
}
}
return
}
// execute create sql: lastInsertID implemention for majority of dialects
if lastInsertIDReturningSuffix == "" && lastInsertIDOutputInterstitial == "" {
if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
// set rows affected count
scope.db.RowsAffected, _ = result.RowsAffected()
// set primary value to primary field
if primaryField != nil && primaryField.IsBlank {
if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil {
scope.Err(primaryField.Set(primaryValue))
}
}
}
return
}
// execute create sql: dialects with additional lastInsertID requirements (currently postgres & mssql)
if primaryField.Field.CanAddr() {
if err := scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...).Scan(primaryField.Field.Addr().Interface()); scope.Err(err) == nil {
primaryField.IsBlank = false
scope.db.RowsAffected = 1
}
} else {
scope.Err(ErrUnaddressable)
}
return
}
}
// forceReloadAfterCreateCallback will reload columns that having default value, and set it back to current object
func forceReloadAfterCreateCallback(scope *Scope) {
if blankColumnsWithDefaultValue, ok := scope.InstanceGet("gorm:blank_columns_with_default_value"); ok {
db := scope.DB().New().Table(scope.TableName()).Select(blankColumnsWithDefaultValue.([]string))
for _, field := range scope.Fields() {
if field.IsPrimaryKey && !field.IsBlank {
db = db.Where(fmt.Sprintf("%v = ?", field.DBName), field.Field.Interface())
}
}
db.Scan(scope.Value)
}
}
// afterCreateCallback will invoke `AfterCreate`, `AfterSave` method after creating
func afterCreateCallback(scope *Scope) {
if !scope.HasError() {
scope.CallMethod("AfterCreate")
}
if !scope.HasError() {
scope.CallMethod("AfterSave")
}
}

View File

@ -1,63 +0,0 @@
package gorm
import (
"errors"
"fmt"
)
// Define callbacks for deleting
func init() {
DefaultCallback.Delete().Register("gorm:begin_transaction", beginTransactionCallback)
DefaultCallback.Delete().Register("gorm:before_delete", beforeDeleteCallback)
DefaultCallback.Delete().Register("gorm:delete", deleteCallback)
DefaultCallback.Delete().Register("gorm:after_delete", afterDeleteCallback)
DefaultCallback.Delete().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
}
// beforeDeleteCallback will invoke `BeforeDelete` method before deleting
func beforeDeleteCallback(scope *Scope) {
if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() {
scope.Err(errors.New("missing WHERE clause while deleting"))
return
}
if !scope.HasError() {
scope.CallMethod("BeforeDelete")
}
}
// deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete)
func deleteCallback(scope *Scope) {
if !scope.HasError() {
var extraOption string
if str, ok := scope.Get("gorm:delete_option"); ok {
extraOption = fmt.Sprint(str)
}
deletedAtField, hasDeletedAtField := scope.FieldByName("DeletedAt")
if !scope.Search.Unscoped && hasDeletedAtField {
scope.Raw(fmt.Sprintf(
"UPDATE %v SET %v=%v%v%v",
scope.QuotedTableName(),
scope.Quote(deletedAtField.DBName),
scope.AddToVars(scope.db.nowFunc()),
addExtraSpaceIfExist(scope.CombinedConditionSql()),
addExtraSpaceIfExist(extraOption),
)).Exec()
} else {
scope.Raw(fmt.Sprintf(
"DELETE FROM %v%v%v",
scope.QuotedTableName(),
addExtraSpaceIfExist(scope.CombinedConditionSql()),
addExtraSpaceIfExist(extraOption),
)).Exec()
}
}
}
// afterDeleteCallback will invoke `AfterDelete` method after deleting
func afterDeleteCallback(scope *Scope) {
if !scope.HasError() {
scope.CallMethod("AfterDelete")
}
}

View File

@ -1,109 +0,0 @@
package gorm
import (
"errors"
"fmt"
"reflect"
)
// Define callbacks for querying
func init() {
DefaultCallback.Query().Register("gorm:query", queryCallback)
DefaultCallback.Query().Register("gorm:preload", preloadCallback)
DefaultCallback.Query().Register("gorm:after_query", afterQueryCallback)
}
// queryCallback used to query data from database
func queryCallback(scope *Scope) {
if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
return
}
//we are only preloading relations, dont touch base model
if _, skip := scope.InstanceGet("gorm:only_preload"); skip {
return
}
defer scope.trace(NowFunc())
var (
isSlice, isPtr bool
resultType reflect.Type
results = scope.IndirectValue()
)
if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok {
if primaryField := scope.PrimaryField(); primaryField != nil {
scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryField.DBName), orderBy))
}
}
if value, ok := scope.Get("gorm:query_destination"); ok {
results = indirect(reflect.ValueOf(value))
}
if kind := results.Kind(); kind == reflect.Slice {
isSlice = true
resultType = results.Type().Elem()
results.Set(reflect.MakeSlice(results.Type(), 0, 0))
if resultType.Kind() == reflect.Ptr {
isPtr = true
resultType = resultType.Elem()
}
} else if kind != reflect.Struct {
scope.Err(errors.New("unsupported destination, should be slice or struct"))
return
}
scope.prepareQuerySQL()
if !scope.HasError() {
scope.db.RowsAffected = 0
if str, ok := scope.Get("gorm:query_hint"); ok {
scope.SQL = fmt.Sprint(str) + scope.SQL
}
if str, ok := scope.Get("gorm:query_option"); ok {
scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str))
}
if rows, err := scope.SQLDB().Query(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
defer rows.Close()
columns, _ := rows.Columns()
for rows.Next() {
scope.db.RowsAffected++
elem := results
if isSlice {
elem = reflect.New(resultType).Elem()
}
scope.scan(rows, columns, scope.New(elem.Addr().Interface()).Fields())
if isSlice {
if isPtr {
results.Set(reflect.Append(results, elem.Addr()))
} else {
results.Set(reflect.Append(results, elem))
}
}
}
if err := rows.Err(); err != nil {
scope.Err(err)
} else if scope.db.RowsAffected == 0 && !isSlice {
scope.Err(ErrRecordNotFound)
}
}
}
}
// afterQueryCallback will invoke `AfterFind` method after querying
func afterQueryCallback(scope *Scope) {
if !scope.HasError() {
scope.CallMethod("AfterFind")
}
}

View File

@ -1,410 +0,0 @@
package gorm
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
// preloadCallback used to preload associations
func preloadCallback(scope *Scope) {
if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
return
}
if ap, ok := scope.Get("gorm:auto_preload"); ok {
// If gorm:auto_preload IS NOT a bool then auto preload.
// Else if it IS a bool, use the value
if apb, ok := ap.(bool); !ok {
autoPreload(scope)
} else if apb {
autoPreload(scope)
}
}
if scope.Search.preload == nil || scope.HasError() {
return
}
var (
preloadedMap = map[string]bool{}
fields = scope.Fields()
)
for _, preload := range scope.Search.preload {
var (
preloadFields = strings.Split(preload.schema, ".")
currentScope = scope
currentFields = fields
)
for idx, preloadField := range preloadFields {
var currentPreloadConditions []interface{}
if currentScope == nil {
continue
}
// if not preloaded
if preloadKey := strings.Join(preloadFields[:idx+1], "."); !preloadedMap[preloadKey] {
// assign search conditions to last preload
if idx == len(preloadFields)-1 {
currentPreloadConditions = preload.conditions
}
for _, field := range currentFields {
if field.Name != preloadField || field.Relationship == nil {
continue
}
switch field.Relationship.Kind {
case "has_one":
currentScope.handleHasOnePreload(field, currentPreloadConditions)
case "has_many":
currentScope.handleHasManyPreload(field, currentPreloadConditions)
case "belongs_to":
currentScope.handleBelongsToPreload(field, currentPreloadConditions)
case "many_to_many":
currentScope.handleManyToManyPreload(field, currentPreloadConditions)
default:
scope.Err(errors.New("unsupported relation"))
}
preloadedMap[preloadKey] = true
break
}
if !preloadedMap[preloadKey] {
scope.Err(fmt.Errorf("can't preload field %s for %s", preloadField, currentScope.GetModelStruct().ModelType))
return
}
}
// preload next level
if idx < len(preloadFields)-1 {
currentScope = currentScope.getColumnAsScope(preloadField)
if currentScope != nil {
currentFields = currentScope.Fields()
}
}
}
}
}
func autoPreload(scope *Scope) {
for _, field := range scope.Fields() {
if field.Relationship == nil {
continue
}
if val, ok := field.TagSettingsGet("PRELOAD"); ok {
if preload, err := strconv.ParseBool(val); err != nil {
scope.Err(errors.New("invalid preload option"))
return
} else if !preload {
continue
}
}
scope.Search.Preload(field.Name)
}
}
func (scope *Scope) generatePreloadDBWithConditions(conditions []interface{}) (*DB, []interface{}) {
var (
preloadDB = scope.NewDB()
preloadConditions []interface{}
)
for _, condition := range conditions {
if scopes, ok := condition.(func(*DB) *DB); ok {
preloadDB = scopes(preloadDB)
} else {
preloadConditions = append(preloadConditions, condition)
}
}
return preloadDB, preloadConditions
}
// handleHasOnePreload used to preload has one associations
func (scope *Scope) handleHasOnePreload(field *Field, conditions []interface{}) {
relation := field.Relationship
// get relations's primary keys
primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value)
if len(primaryKeys) == 0 {
return
}
// preload conditions
preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
// find relations
query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys))
values := toQueryValues(primaryKeys)
if relation.PolymorphicType != "" {
query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName))
values = append(values, relation.PolymorphicValue)
}
results := makeSlice(field.Struct.Type)
scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error)
// assign find results
var (
resultsValue = indirect(reflect.ValueOf(results))
indirectScopeValue = scope.IndirectValue()
)
if indirectScopeValue.Kind() == reflect.Slice {
foreignValuesToResults := make(map[string]reflect.Value)
for i := 0; i < resultsValue.Len(); i++ {
result := resultsValue.Index(i)
foreignValues := toString(getValueFromFields(result, relation.ForeignFieldNames))
foreignValuesToResults[foreignValues] = result
}
for j := 0; j < indirectScopeValue.Len(); j++ {
indirectValue := indirect(indirectScopeValue.Index(j))
valueString := toString(getValueFromFields(indirectValue, relation.AssociationForeignFieldNames))
if result, found := foreignValuesToResults[valueString]; found {
indirectValue.FieldByName(field.Name).Set(result)
}
}
} else {
for i := 0; i < resultsValue.Len(); i++ {
result := resultsValue.Index(i)
scope.Err(field.Set(result))
}
}
}
// handleHasManyPreload used to preload has many associations
func (scope *Scope) handleHasManyPreload(field *Field, conditions []interface{}) {
relation := field.Relationship
// get relations's primary keys
primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value)
if len(primaryKeys) == 0 {
return
}
// preload conditions
preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
// find relations
query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys))
values := toQueryValues(primaryKeys)
if relation.PolymorphicType != "" {
query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName))
values = append(values, relation.PolymorphicValue)
}
results := makeSlice(field.Struct.Type)
scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error)
// assign find results
var (
resultsValue = indirect(reflect.ValueOf(results))
indirectScopeValue = scope.IndirectValue()
)
if indirectScopeValue.Kind() == reflect.Slice {
preloadMap := make(map[string][]reflect.Value)
for i := 0; i < resultsValue.Len(); i++ {
result := resultsValue.Index(i)
foreignValues := getValueFromFields(result, relation.ForeignFieldNames)
preloadMap[toString(foreignValues)] = append(preloadMap[toString(foreignValues)], result)
}
for j := 0; j < indirectScopeValue.Len(); j++ {
object := indirect(indirectScopeValue.Index(j))
objectRealValue := getValueFromFields(object, relation.AssociationForeignFieldNames)
f := object.FieldByName(field.Name)
if results, ok := preloadMap[toString(objectRealValue)]; ok {
f.Set(reflect.Append(f, results...))
} else {
f.Set(reflect.MakeSlice(f.Type(), 0, 0))
}
}
} else {
scope.Err(field.Set(resultsValue))
}
}
// handleBelongsToPreload used to preload belongs to associations
func (scope *Scope) handleBelongsToPreload(field *Field, conditions []interface{}) {
relation := field.Relationship
// preload conditions
preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
// get relations's primary keys
primaryKeys := scope.getColumnAsArray(relation.ForeignFieldNames, scope.Value)
if len(primaryKeys) == 0 {
return
}
// find relations
results := makeSlice(field.Struct.Type)
scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.AssociationForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error)
// assign find results
var (
resultsValue = indirect(reflect.ValueOf(results))
indirectScopeValue = scope.IndirectValue()
)
foreignFieldToObjects := make(map[string][]*reflect.Value)
if indirectScopeValue.Kind() == reflect.Slice {
for j := 0; j < indirectScopeValue.Len(); j++ {
object := indirect(indirectScopeValue.Index(j))
valueString := toString(getValueFromFields(object, relation.ForeignFieldNames))
foreignFieldToObjects[valueString] = append(foreignFieldToObjects[valueString], &object)
}
}
for i := 0; i < resultsValue.Len(); i++ {
result := resultsValue.Index(i)
if indirectScopeValue.Kind() == reflect.Slice {
valueString := toString(getValueFromFields(result, relation.AssociationForeignFieldNames))
if objects, found := foreignFieldToObjects[valueString]; found {
for _, object := range objects {
object.FieldByName(field.Name).Set(result)
}
}
} else {
scope.Err(field.Set(result))
}
}
}
// handleManyToManyPreload used to preload many to many associations
func (scope *Scope) handleManyToManyPreload(field *Field, conditions []interface{}) {
var (
relation = field.Relationship
joinTableHandler = relation.JoinTableHandler
fieldType = field.Struct.Type.Elem()
foreignKeyValue interface{}
foreignKeyType = reflect.ValueOf(&foreignKeyValue).Type()
linkHash = map[string][]reflect.Value{}
isPtr bool
)
if fieldType.Kind() == reflect.Ptr {
isPtr = true
fieldType = fieldType.Elem()
}
var sourceKeys = []string{}
for _, key := range joinTableHandler.SourceForeignKeys() {
sourceKeys = append(sourceKeys, key.DBName)
}
// preload conditions
preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions)
// generate query with join table
newScope := scope.New(reflect.New(fieldType).Interface())
preloadDB = preloadDB.Table(newScope.TableName()).Model(newScope.Value)
if len(preloadDB.search.selects) == 0 {
preloadDB = preloadDB.Select("*")
}
preloadDB = joinTableHandler.JoinWith(joinTableHandler, preloadDB, scope.Value)
// preload inline conditions
if len(preloadConditions) > 0 {
preloadDB = preloadDB.Where(preloadConditions[0], preloadConditions[1:]...)
}
rows, err := preloadDB.Rows()
if scope.Err(err) != nil {
return
}
defer rows.Close()
columns, _ := rows.Columns()
for rows.Next() {
var (
elem = reflect.New(fieldType).Elem()
fields = scope.New(elem.Addr().Interface()).Fields()
)
// register foreign keys in join tables
var joinTableFields []*Field
for _, sourceKey := range sourceKeys {
joinTableFields = append(joinTableFields, &Field{StructField: &StructField{DBName: sourceKey, IsNormal: true}, Field: reflect.New(foreignKeyType).Elem()})
}
scope.scan(rows, columns, append(fields, joinTableFields...))
scope.New(elem.Addr().Interface()).
InstanceSet("gorm:skip_query_callback", true).
callCallbacks(scope.db.parent.callbacks.queries)
var foreignKeys = make([]interface{}, len(sourceKeys))
// generate hashed forkey keys in join table
for idx, joinTableField := range joinTableFields {
if !joinTableField.Field.IsNil() {
foreignKeys[idx] = joinTableField.Field.Elem().Interface()
}
}
hashedSourceKeys := toString(foreignKeys)
if isPtr {
linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem.Addr())
} else {
linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem)
}
}
if err := rows.Err(); err != nil {
scope.Err(err)
}
// assign find results
var (
indirectScopeValue = scope.IndirectValue()
fieldsSourceMap = map[string][]reflect.Value{}
foreignFieldNames = []string{}
)
for _, dbName := range relation.ForeignFieldNames {
if field, ok := scope.FieldByName(dbName); ok {
foreignFieldNames = append(foreignFieldNames, field.Name)
}
}
if indirectScopeValue.Kind() == reflect.Slice {
for j := 0; j < indirectScopeValue.Len(); j++ {
object := indirect(indirectScopeValue.Index(j))
key := toString(getValueFromFields(object, foreignFieldNames))
fieldsSourceMap[key] = append(fieldsSourceMap[key], object.FieldByName(field.Name))
}
} else if indirectScopeValue.IsValid() {
key := toString(getValueFromFields(indirectScopeValue, foreignFieldNames))
fieldsSourceMap[key] = append(fieldsSourceMap[key], indirectScopeValue.FieldByName(field.Name))
}
for source, fields := range fieldsSourceMap {
for _, f := range fields {
//If not 0 this means Value is a pointer and we already added preloaded models to it
if f.Len() != 0 {
continue
}
v := reflect.MakeSlice(f.Type(), 0, 0)
if len(linkHash[source]) > 0 {
v = reflect.Append(f, linkHash[source]...)
}
f.Set(v)
}
}
}

View File

@ -1,41 +0,0 @@
package gorm
import (
"database/sql"
"fmt"
)
// Define callbacks for row query
func init() {
DefaultCallback.RowQuery().Register("gorm:row_query", rowQueryCallback)
}
type RowQueryResult struct {
Row *sql.Row
}
type RowsQueryResult struct {
Rows *sql.Rows
Error error
}
// queryCallback used to query data from database
func rowQueryCallback(scope *Scope) {
if result, ok := scope.InstanceGet("row_query_result"); ok {
scope.prepareQuerySQL()
if str, ok := scope.Get("gorm:query_hint"); ok {
scope.SQL = fmt.Sprint(str) + scope.SQL
}
if str, ok := scope.Get("gorm:query_option"); ok {
scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str))
}
if rowResult, ok := result.(*RowQueryResult); ok {
rowResult.Row = scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...)
} else if rowsResult, ok := result.(*RowsQueryResult); ok {
rowsResult.Rows, rowsResult.Error = scope.SQLDB().Query(scope.SQL, scope.SQLVars...)
}
}
}

View File

@ -1,170 +0,0 @@
package gorm
import (
"reflect"
"strings"
)
func beginTransactionCallback(scope *Scope) {
scope.Begin()
}
func commitOrRollbackTransactionCallback(scope *Scope) {
scope.CommitOrRollback()
}
func saveAssociationCheck(scope *Scope, field *Field) (autoUpdate bool, autoCreate bool, saveReference bool, r *Relationship) {
checkTruth := func(value interface{}) bool {
if v, ok := value.(bool); ok && !v {
return false
}
if v, ok := value.(string); ok {
v = strings.ToLower(v)
return v == "true"
}
return true
}
if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored {
if r = field.Relationship; r != nil {
autoUpdate, autoCreate, saveReference = true, true, true
if value, ok := scope.Get("gorm:save_associations"); ok {
autoUpdate = checkTruth(value)
autoCreate = autoUpdate
saveReference = autoUpdate
} else if value, ok := field.TagSettingsGet("SAVE_ASSOCIATIONS"); ok {
autoUpdate = checkTruth(value)
autoCreate = autoUpdate
saveReference = autoUpdate
}
if value, ok := scope.Get("gorm:association_autoupdate"); ok {
autoUpdate = checkTruth(value)
} else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOUPDATE"); ok {
autoUpdate = checkTruth(value)
}
if value, ok := scope.Get("gorm:association_autocreate"); ok {
autoCreate = checkTruth(value)
} else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOCREATE"); ok {
autoCreate = checkTruth(value)
}
if value, ok := scope.Get("gorm:association_save_reference"); ok {
saveReference = checkTruth(value)
} else if value, ok := field.TagSettingsGet("ASSOCIATION_SAVE_REFERENCE"); ok {
saveReference = checkTruth(value)
}
}
}
return
}
func saveBeforeAssociationsCallback(scope *Scope) {
for _, field := range scope.Fields() {
autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field)
if relationship != nil && relationship.Kind == "belongs_to" {
fieldValue := field.Field.Addr().Interface()
newScope := scope.New(fieldValue)
if newScope.PrimaryKeyZero() {
if autoCreate {
scope.Err(scope.NewDB().Save(fieldValue).Error)
}
} else if autoUpdate {
scope.Err(scope.NewDB().Save(fieldValue).Error)
}
if saveReference {
if len(relationship.ForeignFieldNames) != 0 {
// set value's foreign key
for idx, fieldName := range relationship.ForeignFieldNames {
associationForeignName := relationship.AssociationForeignDBNames[idx]
if foreignField, ok := scope.New(fieldValue).FieldByName(associationForeignName); ok {
scope.Err(scope.SetColumn(fieldName, foreignField.Field.Interface()))
}
}
}
}
}
}
}
func saveAfterAssociationsCallback(scope *Scope) {
for _, field := range scope.Fields() {
autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field)
if relationship != nil && (relationship.Kind == "has_one" || relationship.Kind == "has_many" || relationship.Kind == "many_to_many") {
value := field.Field
switch value.Kind() {
case reflect.Slice:
for i := 0; i < value.Len(); i++ {
newDB := scope.NewDB()
elem := value.Index(i).Addr().Interface()
newScope := newDB.NewScope(elem)
if saveReference {
if relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 {
for idx, fieldName := range relationship.ForeignFieldNames {
associationForeignName := relationship.AssociationForeignDBNames[idx]
if f, ok := scope.FieldByName(associationForeignName); ok {
scope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))
}
}
}
if relationship.PolymorphicType != "" {
scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue))
}
}
if newScope.PrimaryKeyZero() {
if autoCreate {
scope.Err(newDB.Save(elem).Error)
}
} else if autoUpdate {
scope.Err(newDB.Save(elem).Error)
}
if !scope.New(newScope.Value).PrimaryKeyZero() && saveReference {
if joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil {
scope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value))
}
}
}
default:
elem := value.Addr().Interface()
newScope := scope.New(elem)
if saveReference {
if len(relationship.ForeignFieldNames) != 0 {
for idx, fieldName := range relationship.ForeignFieldNames {
associationForeignName := relationship.AssociationForeignDBNames[idx]
if f, ok := scope.FieldByName(associationForeignName); ok {
scope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))
}
}
}
if relationship.PolymorphicType != "" {
scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue))
}
}
if newScope.PrimaryKeyZero() {
if autoCreate {
scope.Err(scope.NewDB().Save(elem).Error)
}
} else if autoUpdate {
scope.Err(scope.NewDB().Save(elem).Error)
}
}
}
}
}

View File

@ -1,121 +0,0 @@
package gorm
import (
"errors"
"fmt"
"sort"
"strings"
)
// Define callbacks for updating
func init() {
DefaultCallback.Update().Register("gorm:assign_updating_attributes", assignUpdatingAttributesCallback)
DefaultCallback.Update().Register("gorm:begin_transaction", beginTransactionCallback)
DefaultCallback.Update().Register("gorm:before_update", beforeUpdateCallback)
DefaultCallback.Update().Register("gorm:save_before_associations", saveBeforeAssociationsCallback)
DefaultCallback.Update().Register("gorm:update_time_stamp", updateTimeStampForUpdateCallback)
DefaultCallback.Update().Register("gorm:update", updateCallback)
DefaultCallback.Update().Register("gorm:save_after_associations", saveAfterAssociationsCallback)
DefaultCallback.Update().Register("gorm:after_update", afterUpdateCallback)
DefaultCallback.Update().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback)
}
// assignUpdatingAttributesCallback assign updating attributes to model
func assignUpdatingAttributesCallback(scope *Scope) {
if attrs, ok := scope.InstanceGet("gorm:update_interface"); ok {
if updateMaps, hasUpdate := scope.updatedAttrsWithValues(attrs); hasUpdate {
scope.InstanceSet("gorm:update_attrs", updateMaps)
} else {
scope.SkipLeft()
}
}
}
// beforeUpdateCallback will invoke `BeforeSave`, `BeforeUpdate` method before updating
func beforeUpdateCallback(scope *Scope) {
if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() {
scope.Err(errors.New("missing WHERE clause while updating"))
return
}
if _, ok := scope.Get("gorm:update_column"); !ok {
if !scope.HasError() {
scope.CallMethod("BeforeSave")
}
if !scope.HasError() {
scope.CallMethod("BeforeUpdate")
}
}
}
// updateTimeStampForUpdateCallback will set `UpdatedAt` when updating
func updateTimeStampForUpdateCallback(scope *Scope) {
if _, ok := scope.Get("gorm:update_column"); !ok {
scope.SetColumn("UpdatedAt", scope.db.nowFunc())
}
}
// updateCallback the callback used to update data to database
func updateCallback(scope *Scope) {
if !scope.HasError() {
var sqls []string
if updateAttrs, ok := scope.InstanceGet("gorm:update_attrs"); ok {
// Sort the column names so that the generated SQL is the same every time.
updateMap := updateAttrs.(map[string]interface{})
var columns []string
for c := range updateMap {
columns = append(columns, c)
}
sort.Strings(columns)
for _, column := range columns {
value := updateMap[column]
sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(column), scope.AddToVars(value)))
}
} else {
for _, field := range scope.Fields() {
if scope.changeableField(field) {
if !field.IsPrimaryKey && field.IsNormal && (field.Name != "CreatedAt" || !field.IsBlank) {
if !field.IsForeignKey || !field.IsBlank || !field.HasDefaultValue {
sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface())))
}
} else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" {
for _, foreignKey := range relationship.ForeignDBNames {
if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) {
sqls = append(sqls,
fmt.Sprintf("%v = %v", scope.Quote(foreignField.DBName), scope.AddToVars(foreignField.Field.Interface())))
}
}
}
}
}
}
var extraOption string
if str, ok := scope.Get("gorm:update_option"); ok {
extraOption = fmt.Sprint(str)
}
if len(sqls) > 0 {
scope.Raw(fmt.Sprintf(
"UPDATE %v SET %v%v%v",
scope.QuotedTableName(),
strings.Join(sqls, ", "),
addExtraSpaceIfExist(scope.CombinedConditionSql()),
addExtraSpaceIfExist(extraOption),
)).Exec()
}
}
}
// afterUpdateCallback will invoke `AfterUpdate`, `AfterSave` method after updating
func afterUpdateCallback(scope *Scope) {
if _, ok := scope.Get("gorm:update_column"); !ok {
if !scope.HasError() {
scope.CallMethod("AfterUpdate")
}
if !scope.HasError() {
scope.CallMethod("AfterSave")
}
}
}

View File

@ -1,147 +0,0 @@
package gorm
import (
"database/sql"
"fmt"
"reflect"
"strconv"
"strings"
)
// Dialect interface contains behaviors that differ across SQL database
type Dialect interface {
// GetName get dialect's name
GetName() string
// SetDB set db for dialect
SetDB(db SQLCommon)
// BindVar return the placeholder for actual values in SQL statements, in many dbs it is "?", Postgres using $1
BindVar(i int) string
// Quote quotes field name to avoid SQL parsing exceptions by using a reserved word as a field name
Quote(key string) string
// DataTypeOf return data's sql type
DataTypeOf(field *StructField) string
// HasIndex check has index or not
HasIndex(tableName string, indexName string) bool
// HasForeignKey check has foreign key or not
HasForeignKey(tableName string, foreignKeyName string) bool
// RemoveIndex remove index
RemoveIndex(tableName string, indexName string) error
// HasTable check has table or not
HasTable(tableName string) bool
// HasColumn check has column or not
HasColumn(tableName string, columnName string) bool
// ModifyColumn modify column's type
ModifyColumn(tableName string, columnName string, typ string) error
// LimitAndOffsetSQL return generated SQL with Limit and Offset, as mssql has special case
LimitAndOffsetSQL(limit, offset interface{}) (string, error)
// SelectFromDummyTable return select values, for most dbs, `SELECT values` just works, mysql needs `SELECT value FROM DUAL`
SelectFromDummyTable() string
// LastInsertIDOutputInterstitial most dbs support LastInsertId, but mssql needs to use `OUTPUT`
LastInsertIDOutputInterstitial(tableName, columnName string, columns []string) string
// LastInsertIdReturningSuffix most dbs support LastInsertId, but postgres needs to use `RETURNING`
LastInsertIDReturningSuffix(tableName, columnName string) string
// DefaultValueStr
DefaultValueStr() string
// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
BuildKeyName(kind, tableName string, fields ...string) string
// NormalizeIndexAndColumn returns valid index name and column name depending on each dialect
NormalizeIndexAndColumn(indexName, columnName string) (string, string)
// CurrentDatabase return current database name
CurrentDatabase() string
}
var dialectsMap = map[string]Dialect{}
func newDialect(name string, db SQLCommon) Dialect {
if value, ok := dialectsMap[name]; ok {
dialect := reflect.New(reflect.TypeOf(value).Elem()).Interface().(Dialect)
dialect.SetDB(db)
return dialect
}
fmt.Printf("`%v` is not officially supported, running under compatibility mode.\n", name)
commontDialect := &commonDialect{}
commontDialect.SetDB(db)
return commontDialect
}
// RegisterDialect register new dialect
func RegisterDialect(name string, dialect Dialect) {
dialectsMap[name] = dialect
}
// GetDialect gets the dialect for the specified dialect name
func GetDialect(name string) (dialect Dialect, ok bool) {
dialect, ok = dialectsMap[name]
return
}
// ParseFieldStructForDialect get field's sql data type
var ParseFieldStructForDialect = func(field *StructField, dialect Dialect) (fieldValue reflect.Value, sqlType string, size int, additionalType string) {
// Get redirected field type
var (
reflectType = field.Struct.Type
dataType, _ = field.TagSettingsGet("TYPE")
)
for reflectType.Kind() == reflect.Ptr {
reflectType = reflectType.Elem()
}
// Get redirected field value
fieldValue = reflect.Indirect(reflect.New(reflectType))
if gormDataType, ok := fieldValue.Interface().(interface {
GormDataType(Dialect) string
}); ok {
dataType = gormDataType.GormDataType(dialect)
}
// Get scanner's real value
if dataType == "" {
var getScannerValue func(reflect.Value)
getScannerValue = func(value reflect.Value) {
fieldValue = value
if _, isScanner := reflect.New(fieldValue.Type()).Interface().(sql.Scanner); isScanner && fieldValue.Kind() == reflect.Struct {
getScannerValue(fieldValue.Field(0))
}
}
getScannerValue(fieldValue)
}
// Default Size
if num, ok := field.TagSettingsGet("SIZE"); ok {
size, _ = strconv.Atoi(num)
} else {
size = 255
}
// Default type from tag setting
notNull, _ := field.TagSettingsGet("NOT NULL")
unique, _ := field.TagSettingsGet("UNIQUE")
additionalType = notNull + " " + unique
if value, ok := field.TagSettingsGet("DEFAULT"); ok {
additionalType = additionalType + " DEFAULT " + value
}
if value, ok := field.TagSettingsGet("COMMENT"); ok {
additionalType = additionalType + " COMMENT " + value
}
return fieldValue, dataType, size, strings.TrimSpace(additionalType)
}
func currentDatabaseAndTable(dialect Dialect, tableName string) (string, string) {
if strings.Contains(tableName, ".") {
splitStrings := strings.SplitN(tableName, ".", 2)
return splitStrings[0], splitStrings[1]
}
return dialect.CurrentDatabase(), tableName
}

View File

@ -1,196 +0,0 @@
package gorm
import (
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
var keyNameRegex = regexp.MustCompile("[^a-zA-Z0-9]+")
// DefaultForeignKeyNamer contains the default foreign key name generator method
type DefaultForeignKeyNamer struct {
}
type commonDialect struct {
db SQLCommon
DefaultForeignKeyNamer
}
func init() {
RegisterDialect("common", &commonDialect{})
}
func (commonDialect) GetName() string {
return "common"
}
func (s *commonDialect) SetDB(db SQLCommon) {
s.db = db
}
func (commonDialect) BindVar(i int) string {
return "$$$" // ?
}
func (commonDialect) Quote(key string) string {
return fmt.Sprintf(`"%s"`, key)
}
func (s *commonDialect) fieldCanAutoIncrement(field *StructField) bool {
if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
return strings.ToLower(value) != "false"
}
return field.IsPrimaryKey
}
func (s *commonDialect) DataTypeOf(field *StructField) string {
var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
if sqlType == "" {
switch dataValue.Kind() {
case reflect.Bool:
sqlType = "BOOLEAN"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
if s.fieldCanAutoIncrement(field) {
sqlType = "INTEGER AUTO_INCREMENT"
} else {
sqlType = "INTEGER"
}
case reflect.Int64, reflect.Uint64:
if s.fieldCanAutoIncrement(field) {
sqlType = "BIGINT AUTO_INCREMENT"
} else {
sqlType = "BIGINT"
}
case reflect.Float32, reflect.Float64:
sqlType = "FLOAT"
case reflect.String:
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("VARCHAR(%d)", size)
} else {
sqlType = "VARCHAR(65532)"
}
case reflect.Struct:
if _, ok := dataValue.Interface().(time.Time); ok {
sqlType = "TIMESTAMP"
}
default:
if _, ok := dataValue.Interface().([]byte); ok {
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("BINARY(%d)", size)
} else {
sqlType = "BINARY(65532)"
}
}
}
}
if sqlType == "" {
panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String()))
}
if strings.TrimSpace(additionalType) == "" {
return sqlType
}
return fmt.Sprintf("%v %v", sqlType, additionalType)
}
func (s commonDialect) HasIndex(tableName string, indexName string) bool {
var count int
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count)
return count > 0
}
func (s commonDialect) RemoveIndex(tableName string, indexName string) error {
_, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName))
return err
}
func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool {
return false
}
func (s commonDialect) HasTable(tableName string) bool {
var count int
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count)
return count > 0
}
func (s commonDialect) HasColumn(tableName string, columnName string) bool {
var count int
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count)
return count > 0
}
func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error {
_, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ))
return err
}
func (s commonDialect) CurrentDatabase() (name string) {
s.db.QueryRow("SELECT DATABASE()").Scan(&name)
return
}
// LimitAndOffsetSQL return generated SQL with Limit and Offset
func (s commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string, err error) {
if limit != nil {
if parsedLimit, err := s.parseInt(limit); err != nil {
return "", err
} else if parsedLimit >= 0 {
sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
}
}
if offset != nil {
if parsedOffset, err := s.parseInt(offset); err != nil {
return "", err
} else if parsedOffset >= 0 {
sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
}
}
return
}
func (commonDialect) SelectFromDummyTable() string {
return ""
}
func (commonDialect) LastInsertIDOutputInterstitial(tableName, columnName string, columns []string) string {
return ""
}
func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string {
return ""
}
func (commonDialect) DefaultValueStr() string {
return "DEFAULT VALUES"
}
// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string {
keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_"))
keyName = keyNameRegex.ReplaceAllString(keyName, "_")
return keyName
}
// NormalizeIndexAndColumn returns argument's index name and column name without doing anything
func (commonDialect) NormalizeIndexAndColumn(indexName, columnName string) (string, string) {
return indexName, columnName
}
func (commonDialect) parseInt(value interface{}) (int64, error) {
return strconv.ParseInt(fmt.Sprint(value), 0, 0)
}
// IsByteArrayOrSlice returns true of the reflected value is an array or slice
func IsByteArrayOrSlice(value reflect.Value) bool {
return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0))
}

View File

@ -1,246 +0,0 @@
package gorm
import (
"crypto/sha1"
"database/sql"
"fmt"
"reflect"
"regexp"
"strings"
"time"
"unicode/utf8"
)
var mysqlIndexRegex = regexp.MustCompile(`^(.+)\((\d+)\)$`)
type mysql struct {
commonDialect
}
func init() {
RegisterDialect("mysql", &mysql{})
}
func (mysql) GetName() string {
return "mysql"
}
func (mysql) Quote(key string) string {
return fmt.Sprintf("`%s`", key)
}
// Get Data Type for MySQL Dialect
func (s *mysql) DataTypeOf(field *StructField) string {
var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
// MySQL allows only one auto increment column per table, and it must
// be a KEY column.
if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
if _, ok = field.TagSettingsGet("INDEX"); !ok && !field.IsPrimaryKey {
field.TagSettingsDelete("AUTO_INCREMENT")
}
}
if sqlType == "" {
switch dataValue.Kind() {
case reflect.Bool:
sqlType = "boolean"
case reflect.Int8:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "tinyint AUTO_INCREMENT"
} else {
sqlType = "tinyint"
}
case reflect.Int, reflect.Int16, reflect.Int32:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "int AUTO_INCREMENT"
} else {
sqlType = "int"
}
case reflect.Uint8:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "tinyint unsigned AUTO_INCREMENT"
} else {
sqlType = "tinyint unsigned"
}
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "int unsigned AUTO_INCREMENT"
} else {
sqlType = "int unsigned"
}
case reflect.Int64:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "bigint AUTO_INCREMENT"
} else {
sqlType = "bigint"
}
case reflect.Uint64:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "bigint unsigned AUTO_INCREMENT"
} else {
sqlType = "bigint unsigned"
}
case reflect.Float32, reflect.Float64:
sqlType = "double"
case reflect.String:
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("varchar(%d)", size)
} else {
sqlType = "longtext"
}
case reflect.Struct:
if _, ok := dataValue.Interface().(time.Time); ok {
precision := ""
if p, ok := field.TagSettingsGet("PRECISION"); ok {
precision = fmt.Sprintf("(%s)", p)
}
if _, ok := field.TagSettings["NOT NULL"]; ok || field.IsPrimaryKey {
sqlType = fmt.Sprintf("DATETIME%v", precision)
} else {
sqlType = fmt.Sprintf("DATETIME%v NULL", precision)
}
}
default:
if IsByteArrayOrSlice(dataValue) {
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("varbinary(%d)", size)
} else {
sqlType = "longblob"
}
}
}
}
if sqlType == "" {
panic(fmt.Sprintf("invalid sql type %s (%s) in field %s for mysql", dataValue.Type().Name(), dataValue.Kind().String(), field.Name))
}
if strings.TrimSpace(additionalType) == "" {
return sqlType
}
return fmt.Sprintf("%v %v", sqlType, additionalType)
}
func (s mysql) RemoveIndex(tableName string, indexName string) error {
_, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName)))
return err
}
func (s mysql) ModifyColumn(tableName string, columnName string, typ string) error {
_, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v MODIFY COLUMN %v %v", tableName, columnName, typ))
return err
}
func (s mysql) LimitAndOffsetSQL(limit, offset interface{}) (sql string, err error) {
if limit != nil {
parsedLimit, err := s.parseInt(limit)
if err != nil {
return "", err
}
if parsedLimit >= 0 {
sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
if offset != nil {
parsedOffset, err := s.parseInt(offset)
if err != nil {
return "", err
}
if parsedOffset >= 0 {
sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
}
}
}
}
return
}
func (s mysql) HasForeignKey(tableName string, foreignKeyName string) bool {
var count int
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_SCHEMA=? AND TABLE_NAME=? AND CONSTRAINT_NAME=? AND CONSTRAINT_TYPE='FOREIGN KEY'", currentDatabase, tableName, foreignKeyName).Scan(&count)
return count > 0
}
func (s mysql) HasTable(tableName string) bool {
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
var name string
// allow mysql database name with '-' character
if err := s.db.QueryRow(fmt.Sprintf("SHOW TABLES FROM `%s` WHERE `Tables_in_%s` = ?", currentDatabase, currentDatabase), tableName).Scan(&name); err != nil {
if err == sql.ErrNoRows {
return false
}
panic(err)
} else {
return true
}
}
func (s mysql) HasIndex(tableName string, indexName string) bool {
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
if rows, err := s.db.Query(fmt.Sprintf("SHOW INDEXES FROM `%s` FROM `%s` WHERE Key_name = ?", tableName, currentDatabase), indexName); err != nil {
panic(err)
} else {
defer rows.Close()
return rows.Next()
}
}
func (s mysql) HasColumn(tableName string, columnName string) bool {
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
if rows, err := s.db.Query(fmt.Sprintf("SHOW COLUMNS FROM `%s` FROM `%s` WHERE Field = ?", tableName, currentDatabase), columnName); err != nil {
panic(err)
} else {
defer rows.Close()
return rows.Next()
}
}
func (s mysql) CurrentDatabase() (name string) {
s.db.QueryRow("SELECT DATABASE()").Scan(&name)
return
}
func (mysql) SelectFromDummyTable() string {
return "FROM DUAL"
}
func (s mysql) BuildKeyName(kind, tableName string, fields ...string) string {
keyName := s.commonDialect.BuildKeyName(kind, tableName, fields...)
if utf8.RuneCountInString(keyName) <= 64 {
return keyName
}
h := sha1.New()
h.Write([]byte(keyName))
bs := h.Sum(nil)
// sha1 is 40 characters, keep first 24 characters of destination
destRunes := []rune(keyNameRegex.ReplaceAllString(fields[0], "_"))
if len(destRunes) > 24 {
destRunes = destRunes[:24]
}
return fmt.Sprintf("%s%x", string(destRunes), bs)
}
// NormalizeIndexAndColumn returns index name and column name for specify an index prefix length if needed
func (mysql) NormalizeIndexAndColumn(indexName, columnName string) (string, string) {
submatch := mysqlIndexRegex.FindStringSubmatch(indexName)
if len(submatch) != 3 {
return indexName, columnName
}
indexName = submatch[1]
columnName = fmt.Sprintf("%s(%s)", columnName, submatch[2])
return indexName, columnName
}
func (mysql) DefaultValueStr() string {
return "VALUES()"
}

View File

@ -1,147 +0,0 @@
package gorm
import (
"encoding/json"
"fmt"
"reflect"
"strings"
"time"
)
type postgres struct {
commonDialect
}
func init() {
RegisterDialect("postgres", &postgres{})
RegisterDialect("cloudsqlpostgres", &postgres{})
}
func (postgres) GetName() string {
return "postgres"
}
func (postgres) BindVar(i int) string {
return fmt.Sprintf("$%v", i)
}
func (s *postgres) DataTypeOf(field *StructField) string {
var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
if sqlType == "" {
switch dataValue.Kind() {
case reflect.Bool:
sqlType = "boolean"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uintptr:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "serial"
} else {
sqlType = "integer"
}
case reflect.Int64, reflect.Uint32, reflect.Uint64:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "bigserial"
} else {
sqlType = "bigint"
}
case reflect.Float32, reflect.Float64:
sqlType = "numeric"
case reflect.String:
if _, ok := field.TagSettingsGet("SIZE"); !ok {
size = 0 // if SIZE haven't been set, use `text` as the default type, as there are no performance different
}
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("varchar(%d)", size)
} else {
sqlType = "text"
}
case reflect.Struct:
if _, ok := dataValue.Interface().(time.Time); ok {
sqlType = "timestamp with time zone"
}
case reflect.Map:
if dataValue.Type().Name() == "Hstore" {
sqlType = "hstore"
}
default:
if IsByteArrayOrSlice(dataValue) {
sqlType = "bytea"
if isUUID(dataValue) {
sqlType = "uuid"
}
if isJSON(dataValue) {
sqlType = "jsonb"
}
}
}
}
if sqlType == "" {
panic(fmt.Sprintf("invalid sql type %s (%s) for postgres", dataValue.Type().Name(), dataValue.Kind().String()))
}
if strings.TrimSpace(additionalType) == "" {
return sqlType
}
return fmt.Sprintf("%v %v", sqlType, additionalType)
}
func (s postgres) HasIndex(tableName string, indexName string) bool {
var count int
s.db.QueryRow("SELECT count(*) FROM pg_indexes WHERE tablename = $1 AND indexname = $2 AND schemaname = CURRENT_SCHEMA()", tableName, indexName).Scan(&count)
return count > 0
}
func (s postgres) HasForeignKey(tableName string, foreignKeyName string) bool {
var count int
s.db.QueryRow("SELECT count(con.conname) FROM pg_constraint con WHERE $1::regclass::oid = con.conrelid AND con.conname = $2 AND con.contype='f'", tableName, foreignKeyName).Scan(&count)
return count > 0
}
func (s postgres) HasTable(tableName string) bool {
var count int
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = $1 AND table_type = 'BASE TABLE' AND table_schema = CURRENT_SCHEMA()", tableName).Scan(&count)
return count > 0
}
func (s postgres) HasColumn(tableName string, columnName string) bool {
var count int
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_name = $1 AND column_name = $2 AND table_schema = CURRENT_SCHEMA()", tableName, columnName).Scan(&count)
return count > 0
}
func (s postgres) CurrentDatabase() (name string) {
s.db.QueryRow("SELECT CURRENT_DATABASE()").Scan(&name)
return
}
func (s postgres) LastInsertIDOutputInterstitial(tableName, key string, columns []string) string {
return ""
}
func (s postgres) LastInsertIDReturningSuffix(tableName, key string) string {
return fmt.Sprintf("RETURNING %v.%v", tableName, key)
}
func (postgres) SupportLastInsertID() bool {
return false
}
func isUUID(value reflect.Value) bool {
if value.Kind() != reflect.Array || value.Type().Len() != 16 {
return false
}
typename := value.Type().Name()
lower := strings.ToLower(typename)
return "uuid" == lower || "guid" == lower
}
func isJSON(value reflect.Value) bool {
_, ok := value.Interface().(json.RawMessage)
return ok
}

View File

@ -1,107 +0,0 @@
package gorm
import (
"fmt"
"reflect"
"strings"
"time"
)
type sqlite3 struct {
commonDialect
}
func init() {
RegisterDialect("sqlite3", &sqlite3{})
}
func (sqlite3) GetName() string {
return "sqlite3"
}
// Get Data Type for Sqlite Dialect
func (s *sqlite3) DataTypeOf(field *StructField) string {
var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s)
if sqlType == "" {
switch dataValue.Kind() {
case reflect.Bool:
sqlType = "bool"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "integer primary key autoincrement"
} else {
sqlType = "integer"
}
case reflect.Int64, reflect.Uint64:
if s.fieldCanAutoIncrement(field) {
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
sqlType = "integer primary key autoincrement"
} else {
sqlType = "bigint"
}
case reflect.Float32, reflect.Float64:
sqlType = "real"
case reflect.String:
if size > 0 && size < 65532 {
sqlType = fmt.Sprintf("varchar(%d)", size)
} else {
sqlType = "text"
}
case reflect.Struct:
if _, ok := dataValue.Interface().(time.Time); ok {
sqlType = "datetime"
}
default:
if IsByteArrayOrSlice(dataValue) {
sqlType = "blob"
}
}
}
if sqlType == "" {
panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite3", dataValue.Type().Name(), dataValue.Kind().String()))
}
if strings.TrimSpace(additionalType) == "" {
return sqlType
}
return fmt.Sprintf("%v %v", sqlType, additionalType)
}
func (s sqlite3) HasIndex(tableName string, indexName string) bool {
var count int
s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count)
return count > 0
}
func (s sqlite3) HasTable(tableName string) bool {
var count int
s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count)
return count > 0
}
func (s sqlite3) HasColumn(tableName string, columnName string) bool {
var count int
s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');\n", columnName, columnName), tableName).Scan(&count)
return count > 0
}
func (s sqlite3) CurrentDatabase() (name string) {
var (
ifaces = make([]interface{}, 3)
pointers = make([]*string, 3)
i int
)
for i = 0; i < 3; i++ {
ifaces[i] = &pointers[i]
}
if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil {
return
}
if pointers[1] != nil {
name = *pointers[1]
}
return
}

View File

@ -1,30 +0,0 @@
version: '3'
services:
mysql:
image: 'mysql:latest'
ports:
- 9910:3306
environment:
- MYSQL_DATABASE=gorm
- MYSQL_USER=gorm
- MYSQL_PASSWORD=gorm
- MYSQL_RANDOM_ROOT_PASSWORD="yes"
postgres:
image: 'postgres:latest'
ports:
- 9920:5432
environment:
- POSTGRES_USER=gorm
- POSTGRES_DB=gorm
- POSTGRES_PASSWORD=gorm
mssql:
image: 'mcmoe/mssqldocker:latest'
ports:
- 9930:1433
environment:
- ACCEPT_EULA=Y
- SA_PASSWORD=LoremIpsum86
- MSSQL_DB=gorm
- MSSQL_USER=gorm
- MSSQL_PASSWORD=LoremIpsum86

View File

@ -1,72 +0,0 @@
package gorm
import (
"errors"
"strings"
)
var (
// ErrRecordNotFound returns a "record not found error". Occurs only when attempting to query the database with a struct; querying with a slice won't return this error
ErrRecordNotFound = errors.New("record not found")
// ErrInvalidSQL occurs when you attempt a query with invalid SQL
ErrInvalidSQL = errors.New("invalid SQL")
// ErrInvalidTransaction occurs when you are trying to `Commit` or `Rollback`
ErrInvalidTransaction = errors.New("no valid transaction")
// ErrCantStartTransaction can't start transaction when you are trying to start one with `Begin`
ErrCantStartTransaction = errors.New("can't start transaction")
// ErrUnaddressable unaddressable value
ErrUnaddressable = errors.New("using unaddressable value")
)
// Errors contains all happened errors
type Errors []error
// IsRecordNotFoundError returns true if error contains a RecordNotFound error
func IsRecordNotFoundError(err error) bool {
if errs, ok := err.(Errors); ok {
for _, err := range errs {
if err == ErrRecordNotFound {
return true
}
}
}
return err == ErrRecordNotFound
}
// GetErrors gets all errors that have occurred and returns a slice of errors (Error type)
func (errs Errors) GetErrors() []error {
return errs
}
// Add adds an error to a given slice of errors
func (errs Errors) Add(newErrors ...error) Errors {
for _, err := range newErrors {
if err == nil {
continue
}
if errors, ok := err.(Errors); ok {
errs = errs.Add(errors...)
} else {
ok = true
for _, e := range errs {
if err == e {
ok = false
}
}
if ok {
errs = append(errs, err)
}
}
}
return errs
}
// Error takes a slice of all errors that have occurred and returns it as a formatted string
func (errs Errors) Error() string {
var errors = []string{}
for _, e := range errs {
errors = append(errors, e.Error())
}
return strings.Join(errors, "; ")
}

View File

@ -1,66 +0,0 @@
package gorm
import (
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"reflect"
)
// Field model field definition
type Field struct {
*StructField
IsBlank bool
Field reflect.Value
}
// Set set a value to the field
func (field *Field) Set(value interface{}) (err error) {
if !field.Field.IsValid() {
return errors.New("field value not valid")
}
if !field.Field.CanAddr() {
return ErrUnaddressable
}
reflectValue, ok := value.(reflect.Value)
if !ok {
reflectValue = reflect.ValueOf(value)
}
fieldValue := field.Field
if reflectValue.IsValid() {
if reflectValue.Type().ConvertibleTo(fieldValue.Type()) {
fieldValue.Set(reflectValue.Convert(fieldValue.Type()))
} else {
if fieldValue.Kind() == reflect.Ptr {
if fieldValue.IsNil() {
fieldValue.Set(reflect.New(field.Struct.Type.Elem()))
}
fieldValue = fieldValue.Elem()
}
if reflectValue.Type().ConvertibleTo(fieldValue.Type()) {
fieldValue.Set(reflectValue.Convert(fieldValue.Type()))
} else if scanner, ok := fieldValue.Addr().Interface().(sql.Scanner); ok {
v := reflectValue.Interface()
if valuer, ok := v.(driver.Valuer); ok {
if v, err = valuer.Value(); err == nil {
err = scanner.Scan(v)
}
} else {
err = scanner.Scan(v)
}
} else {
err = fmt.Errorf("could not convert argument of field %s from %s to %s", field.Name, reflectValue.Type(), fieldValue.Type())
}
}
} else {
field.Field.Set(reflect.Zero(field.Field.Type()))
}
field.IsBlank = isBlank(field.Field)
return err
}

15
vendor/github.com/jinzhu/gorm/go.mod generated vendored
View File

@ -1,15 +0,0 @@
module github.com/jinzhu/gorm
go 1.12
require (
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5
github.com/go-sql-driver/mysql v1.4.1
github.com/jinzhu/inflection v1.0.0
github.com/jinzhu/now v1.0.1
github.com/lib/pq v1.1.1
github.com/mattn/go-sqlite3 v2.0.1+incompatible
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd // indirect
google.golang.org/appengine v1.4.0 // indirect
)

29
vendor/github.com/jinzhu/gorm/go.sum generated vendored
View File

@ -1,29 +0,0 @@
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.0.1 h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd h1:GGJVjV8waZKRHrgwvtH66z9ZGVurTD1MT0n1Bb+q4aM=
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=

View File

@ -1,24 +0,0 @@
package gorm
import (
"context"
"database/sql"
)
// SQLCommon is the minimal database connection functionality gorm requires. Implemented by *sql.DB.
type SQLCommon interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Prepare(query string) (*sql.Stmt, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
}
type sqlDb interface {
Begin() (*sql.Tx, error)
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
}
type sqlTx interface {
Commit() error
Rollback() error
}

View File

@ -1,211 +0,0 @@
package gorm
import (
"errors"
"fmt"
"reflect"
"strings"
)
// JoinTableHandlerInterface is an interface for how to handle many2many relations
type JoinTableHandlerInterface interface {
// initialize join table handler
Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type)
// Table return join table's table name
Table(db *DB) string
// Add create relationship in join table for source and destination
Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error
// Delete delete relationship in join table for sources
Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error
// JoinWith query with `Join` conditions
JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB
// SourceForeignKeys return source foreign keys
SourceForeignKeys() []JoinTableForeignKey
// DestinationForeignKeys return destination foreign keys
DestinationForeignKeys() []JoinTableForeignKey
}
// JoinTableForeignKey join table foreign key struct
type JoinTableForeignKey struct {
DBName string
AssociationDBName string
}
// JoinTableSource is a struct that contains model type and foreign keys
type JoinTableSource struct {
ModelType reflect.Type
ForeignKeys []JoinTableForeignKey
}
// JoinTableHandler default join table handler
type JoinTableHandler struct {
TableName string `sql:"-"`
Source JoinTableSource `sql:"-"`
Destination JoinTableSource `sql:"-"`
}
// SourceForeignKeys return source foreign keys
func (s *JoinTableHandler) SourceForeignKeys() []JoinTableForeignKey {
return s.Source.ForeignKeys
}
// DestinationForeignKeys return destination foreign keys
func (s *JoinTableHandler) DestinationForeignKeys() []JoinTableForeignKey {
return s.Destination.ForeignKeys
}
// Setup initialize a default join table handler
func (s *JoinTableHandler) Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) {
s.TableName = tableName
s.Source = JoinTableSource{ModelType: source}
s.Source.ForeignKeys = []JoinTableForeignKey{}
for idx, dbName := range relationship.ForeignFieldNames {
s.Source.ForeignKeys = append(s.Source.ForeignKeys, JoinTableForeignKey{
DBName: relationship.ForeignDBNames[idx],
AssociationDBName: dbName,
})
}
s.Destination = JoinTableSource{ModelType: destination}
s.Destination.ForeignKeys = []JoinTableForeignKey{}
for idx, dbName := range relationship.AssociationForeignFieldNames {
s.Destination.ForeignKeys = append(s.Destination.ForeignKeys, JoinTableForeignKey{
DBName: relationship.AssociationForeignDBNames[idx],
AssociationDBName: dbName,
})
}
}
// Table return join table's table name
func (s JoinTableHandler) Table(db *DB) string {
return DefaultTableNameHandler(db, s.TableName)
}
func (s JoinTableHandler) updateConditionMap(conditionMap map[string]interface{}, db *DB, joinTableSources []JoinTableSource, sources ...interface{}) {
for _, source := range sources {
scope := db.NewScope(source)
modelType := scope.GetModelStruct().ModelType
for _, joinTableSource := range joinTableSources {
if joinTableSource.ModelType == modelType {
for _, foreignKey := range joinTableSource.ForeignKeys {
if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok {
conditionMap[foreignKey.DBName] = field.Field.Interface()
}
}
break
}
}
}
}
// Add create relationship in join table for source and destination
func (s JoinTableHandler) Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error {
var (
scope = db.NewScope("")
conditionMap = map[string]interface{}{}
)
// Update condition map for source
s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source}, source)
// Update condition map for destination
s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Destination}, destination)
var assignColumns, binVars, conditions []string
var values []interface{}
for key, value := range conditionMap {
assignColumns = append(assignColumns, scope.Quote(key))
binVars = append(binVars, `?`)
conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key)))
values = append(values, value)
}
for _, value := range values {
values = append(values, value)
}
quotedTable := scope.Quote(handler.Table(db))
sql := fmt.Sprintf(
"INSERT INTO %v (%v) SELECT %v %v WHERE NOT EXISTS (SELECT * FROM %v WHERE %v)",
quotedTable,
strings.Join(assignColumns, ","),
strings.Join(binVars, ","),
scope.Dialect().SelectFromDummyTable(),
quotedTable,
strings.Join(conditions, " AND "),
)
return db.Exec(sql, values...).Error
}
// Delete delete relationship in join table for sources
func (s JoinTableHandler) Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error {
var (
scope = db.NewScope(nil)
conditions []string
values []interface{}
conditionMap = map[string]interface{}{}
)
s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source, s.Destination}, sources...)
for key, value := range conditionMap {
conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key)))
values = append(values, value)
}
return db.Table(handler.Table(db)).Where(strings.Join(conditions, " AND "), values...).Delete("").Error
}
// JoinWith query with `Join` conditions
func (s JoinTableHandler) JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB {
var (
scope = db.NewScope(source)
tableName = handler.Table(db)
quotedTableName = scope.Quote(tableName)
joinConditions []string
values []interface{}
)
if s.Source.ModelType == scope.GetModelStruct().ModelType {
destinationTableName := db.NewScope(reflect.New(s.Destination.ModelType).Interface()).QuotedTableName()
for _, foreignKey := range s.Destination.ForeignKeys {
joinConditions = append(joinConditions, fmt.Sprintf("%v.%v = %v.%v", quotedTableName, scope.Quote(foreignKey.DBName), destinationTableName, scope.Quote(foreignKey.AssociationDBName)))
}
var foreignDBNames []string
var foreignFieldNames []string
for _, foreignKey := range s.Source.ForeignKeys {
foreignDBNames = append(foreignDBNames, foreignKey.DBName)
if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok {
foreignFieldNames = append(foreignFieldNames, field.Name)
}
}
foreignFieldValues := scope.getColumnAsArray(foreignFieldNames, scope.Value)
var condString string
if len(foreignFieldValues) > 0 {
var quotedForeignDBNames []string
for _, dbName := range foreignDBNames {
quotedForeignDBNames = append(quotedForeignDBNames, tableName+"."+dbName)
}
condString = fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, quotedForeignDBNames), toQueryMarks(foreignFieldValues))
keys := scope.getColumnAsArray(foreignFieldNames, scope.Value)
values = append(values, toQueryValues(keys))
} else {
condString = fmt.Sprintf("1 <> 1")
}
return db.Joins(fmt.Sprintf("INNER JOIN %v ON %v", quotedTableName, strings.Join(joinConditions, " AND "))).
Where(condString, toQueryValues(foreignFieldValues)...)
}
db.Error = errors.New("wrong source type for join table handler")
return db
}

View File

@ -1,141 +0,0 @@
package gorm
import (
"database/sql/driver"
"fmt"
"log"
"os"
"reflect"
"regexp"
"strconv"
"time"
"unicode"
)
var (
defaultLogger = Logger{log.New(os.Stdout, "\r\n", 0)}
sqlRegexp = regexp.MustCompile(`\?`)
numericPlaceHolderRegexp = regexp.MustCompile(`\$\d+`)
)
func isPrintable(s string) bool {
for _, r := range s {
if !unicode.IsPrint(r) {
return false
}
}
return true
}
var LogFormatter = func(values ...interface{}) (messages []interface{}) {
if len(values) > 1 {
var (
sql string
formattedValues []string
level = values[0]
currentTime = "\n\033[33m[" + NowFunc().Format("2006-01-02 15:04:05") + "]\033[0m"
source = fmt.Sprintf("\033[35m(%v)\033[0m", values[1])
)
messages = []interface{}{source, currentTime}
if len(values) == 2 {
//remove the line break
currentTime = currentTime[1:]
//remove the brackets
source = fmt.Sprintf("\033[35m%v\033[0m", values[1])
messages = []interface{}{currentTime, source}
}
if level == "sql" {
// duration
messages = append(messages, fmt.Sprintf(" \033[36;1m[%.2fms]\033[0m ", float64(values[2].(time.Duration).Nanoseconds()/1e4)/100.0))
// sql
for _, value := range values[4].([]interface{}) {
indirectValue := reflect.Indirect(reflect.ValueOf(value))
if indirectValue.IsValid() {
value = indirectValue.Interface()
if t, ok := value.(time.Time); ok {
if t.IsZero() {
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", "0000-00-00 00:00:00"))
} else {
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", t.Format("2006-01-02 15:04:05")))
}
} else if b, ok := value.([]byte); ok {
if str := string(b); isPrintable(str) {
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", str))
} else {
formattedValues = append(formattedValues, "'<binary>'")
}
} else if r, ok := value.(driver.Valuer); ok {
if value, err := r.Value(); err == nil && value != nil {
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value))
} else {
formattedValues = append(formattedValues, "NULL")
}
} else {
switch value.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool:
formattedValues = append(formattedValues, fmt.Sprintf("%v", value))
default:
formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value))
}
}
} else {
formattedValues = append(formattedValues, "NULL")
}
}
// differentiate between $n placeholders or else treat like ?
if numericPlaceHolderRegexp.MatchString(values[3].(string)) {
sql = values[3].(string)
for index, value := range formattedValues {
placeholder := fmt.Sprintf(`\$%d([^\d]|$)`, index+1)
sql = regexp.MustCompile(placeholder).ReplaceAllString(sql, value+"$1")
}
} else {
formattedValuesLength := len(formattedValues)
for index, value := range sqlRegexp.Split(values[3].(string), -1) {
sql += value
if index < formattedValuesLength {
sql += formattedValues[index]
}
}
}
messages = append(messages, sql)
messages = append(messages, fmt.Sprintf(" \n\033[36;31m[%v]\033[0m ", strconv.FormatInt(values[5].(int64), 10)+" rows affected or returned "))
} else {
messages = append(messages, "\033[31;1m")
messages = append(messages, values[2:]...)
messages = append(messages, "\033[0m")
}
}
return
}
type logger interface {
Print(v ...interface{})
}
// LogWriter log writer interface
type LogWriter interface {
Println(v ...interface{})
}
// Logger default logger
type Logger struct {
LogWriter
}
// Print format & print log
func (logger Logger) Print(values ...interface{}) {
logger.Println(LogFormatter(values...)...)
}
type nopLogger struct{}
func (nopLogger) Print(values ...interface{}) {}

881
vendor/github.com/jinzhu/gorm/main.go generated vendored
View File

@ -1,881 +0,0 @@
package gorm
import (
"context"
"database/sql"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"time"
)
// DB contains information for current db connection
type DB struct {
sync.RWMutex
Value interface{}
Error error
RowsAffected int64
// single db
db SQLCommon
blockGlobalUpdate bool
logMode logModeValue
logger logger
search *search
values sync.Map
// global db
parent *DB
callbacks *Callback
dialect Dialect
singularTable bool
// function to be used to override the creating of a new timestamp
nowFuncOverride func() time.Time
}
type logModeValue int
const (
defaultLogMode logModeValue = iota
noLogMode
detailedLogMode
)
// Open initialize a new db connection, need to import driver first, e.g:
//
// import _ "github.com/go-sql-driver/mysql"
// func main() {
// db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local")
// }
// GORM has wrapped some drivers, for easier to remember driver's import path, so you could import the mysql driver with
// import _ "github.com/jinzhu/gorm/dialects/mysql"
// // import _ "github.com/jinzhu/gorm/dialects/postgres"
// // import _ "github.com/jinzhu/gorm/dialects/sqlite"
// // import _ "github.com/jinzhu/gorm/dialects/mssql"
func Open(dialect string, args ...interface{}) (db *DB, err error) {
if len(args) == 0 {
err = errors.New("invalid database source")
return nil, err
}
var source string
var dbSQL SQLCommon
var ownDbSQL bool
switch value := args[0].(type) {
case string:
var driver = dialect
if len(args) == 1 {
source = value
} else if len(args) >= 2 {
driver = value
source = args[1].(string)
}
dbSQL, err = sql.Open(driver, source)
ownDbSQL = true
case SQLCommon:
dbSQL = value
ownDbSQL = false
default:
return nil, fmt.Errorf("invalid database source: %v is not a valid type", value)
}
db = &DB{
db: dbSQL,
logger: defaultLogger,
callbacks: DefaultCallback,
dialect: newDialect(dialect, dbSQL),
}
db.parent = db
if err != nil {
return
}
// Send a ping to make sure the database connection is alive.
if d, ok := dbSQL.(*sql.DB); ok {
if err = d.Ping(); err != nil && ownDbSQL {
d.Close()
}
}
return
}
// New clone a new db connection without search conditions
func (s *DB) New() *DB {
clone := s.clone()
clone.search = nil
clone.Value = nil
return clone
}
type closer interface {
Close() error
}
// Close close current db connection. If database connection is not an io.Closer, returns an error.
func (s *DB) Close() error {
if db, ok := s.parent.db.(closer); ok {
return db.Close()
}
return errors.New("can't close current db")
}
// DB get `*sql.DB` from current connection
// If the underlying database connection is not a *sql.DB, returns nil
func (s *DB) DB() *sql.DB {
db, ok := s.db.(*sql.DB)
if !ok {
panic("can't support full GORM on currently status, maybe this is a TX instance.")
}
return db
}
// CommonDB return the underlying `*sql.DB` or `*sql.Tx` instance, mainly intended to allow coexistence with legacy non-GORM code.
func (s *DB) CommonDB() SQLCommon {
return s.db
}
// Dialect get dialect
func (s *DB) Dialect() Dialect {
return s.dialect
}
// Callback return `Callbacks` container, you could add/change/delete callbacks with it
// db.Callback().Create().Register("update_created_at", updateCreated)
// Refer https://jinzhu.github.io/gorm/development.html#callbacks
func (s *DB) Callback() *Callback {
s.parent.callbacks = s.parent.callbacks.clone(s.logger)
return s.parent.callbacks
}
// SetLogger replace default logger
func (s *DB) SetLogger(log logger) {
s.logger = log
}
// LogMode set log mode, `true` for detailed logs, `false` for no log, default, will only print error logs
func (s *DB) LogMode(enable bool) *DB {
if enable {
s.logMode = detailedLogMode
} else {
s.logMode = noLogMode
}
return s
}
// SetNowFuncOverride set the function to be used when creating a new timestamp
func (s *DB) SetNowFuncOverride(nowFuncOverride func() time.Time) *DB {
s.nowFuncOverride = nowFuncOverride
return s
}
// Get a new timestamp, using the provided nowFuncOverride on the DB instance if set,
// otherwise defaults to the global NowFunc()
func (s *DB) nowFunc() time.Time {
if s.nowFuncOverride != nil {
return s.nowFuncOverride()
}
return NowFunc()
}
// BlockGlobalUpdate if true, generates an error on update/delete without where clause.
// This is to prevent eventual error with empty objects updates/deletions
func (s *DB) BlockGlobalUpdate(enable bool) *DB {
s.blockGlobalUpdate = enable
return s
}
// HasBlockGlobalUpdate return state of block
func (s *DB) HasBlockGlobalUpdate() bool {
return s.blockGlobalUpdate
}
// SingularTable use singular table by default
func (s *DB) SingularTable(enable bool) {
s.parent.Lock()
defer s.parent.Unlock()
s.parent.singularTable = enable
}
// NewScope create a scope for current operation
func (s *DB) NewScope(value interface{}) *Scope {
dbClone := s.clone()
dbClone.Value = value
scope := &Scope{db: dbClone, Value: value}
if s.search != nil {
scope.Search = s.search.clone()
} else {
scope.Search = &search{}
}
return scope
}
// QueryExpr returns the query as SqlExpr object
func (s *DB) QueryExpr() *SqlExpr {
scope := s.NewScope(s.Value)
scope.InstanceSet("skip_bindvar", true)
scope.prepareQuerySQL()
return Expr(scope.SQL, scope.SQLVars...)
}
// SubQuery returns the query as sub query
func (s *DB) SubQuery() *SqlExpr {
scope := s.NewScope(s.Value)
scope.InstanceSet("skip_bindvar", true)
scope.prepareQuerySQL()
return Expr(fmt.Sprintf("(%v)", scope.SQL), scope.SQLVars...)
}
// Where return a new relation, filter records with given conditions, accepts `map`, `struct` or `string` as conditions, refer http://jinzhu.github.io/gorm/crud.html#query
func (s *DB) Where(query interface{}, args ...interface{}) *DB {
return s.clone().search.Where(query, args...).db
}
// Or filter records that match before conditions or this one, similar to `Where`
func (s *DB) Or(query interface{}, args ...interface{}) *DB {
return s.clone().search.Or(query, args...).db
}
// Not filter records that don't match current conditions, similar to `Where`
func (s *DB) Not(query interface{}, args ...interface{}) *DB {
return s.clone().search.Not(query, args...).db
}
// Limit specify the number of records to be retrieved
func (s *DB) Limit(limit interface{}) *DB {
return s.clone().search.Limit(limit).db
}
// Offset specify the number of records to skip before starting to return the records
func (s *DB) Offset(offset interface{}) *DB {
return s.clone().search.Offset(offset).db
}
// Order specify order when retrieve records from database, set reorder to `true` to overwrite defined conditions
// db.Order("name DESC")
// db.Order("name DESC", true) // reorder
// db.Order(gorm.Expr("name = ? DESC", "first")) // sql expression
func (s *DB) Order(value interface{}, reorder ...bool) *DB {
return s.clone().search.Order(value, reorder...).db
}
// Select specify fields that you want to retrieve from database when querying, by default, will select all fields;
// When creating/updating, specify fields that you want to save to database
func (s *DB) Select(query interface{}, args ...interface{}) *DB {
return s.clone().search.Select(query, args...).db
}
// Omit specify fields that you want to ignore when saving to database for creating, updating
func (s *DB) Omit(columns ...string) *DB {
return s.clone().search.Omit(columns...).db
}
// Group specify the group method on the find
func (s *DB) Group(query string) *DB {
return s.clone().search.Group(query).db
}
// Having specify HAVING conditions for GROUP BY
func (s *DB) Having(query interface{}, values ...interface{}) *DB {
return s.clone().search.Having(query, values...).db
}
// Joins specify Joins conditions
// db.Joins("JOIN emails ON emails.user_id = users.id AND emails.email = ?", "jinzhu@example.org").Find(&user)
func (s *DB) Joins(query string, args ...interface{}) *DB {
return s.clone().search.Joins(query, args...).db
}
// Scopes pass current database connection to arguments `func(*DB) *DB`, which could be used to add conditions dynamically
// func AmountGreaterThan1000(db *gorm.DB) *gorm.DB {
// return db.Where("amount > ?", 1000)
// }
//
// func OrderStatus(status []string) func (db *gorm.DB) *gorm.DB {
// return func (db *gorm.DB) *gorm.DB {
// return db.Scopes(AmountGreaterThan1000).Where("status in (?)", status)
// }
// }
//
// db.Scopes(AmountGreaterThan1000, OrderStatus([]string{"paid", "shipped"})).Find(&orders)
// Refer https://jinzhu.github.io/gorm/crud.html#scopes
func (s *DB) Scopes(funcs ...func(*DB) *DB) *DB {
for _, f := range funcs {
s = f(s)
}
return s
}
// Unscoped return all record including deleted record, refer Soft Delete https://jinzhu.github.io/gorm/crud.html#soft-delete
func (s *DB) Unscoped() *DB {
return s.clone().search.unscoped().db
}
// Attrs initialize struct with argument if record not found with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate
func (s *DB) Attrs(attrs ...interface{}) *DB {
return s.clone().search.Attrs(attrs...).db
}
// Assign assign result with argument regardless it is found or not with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate
func (s *DB) Assign(attrs ...interface{}) *DB {
return s.clone().search.Assign(attrs...).db
}
// First find first record that match given conditions, order by primary key
func (s *DB) First(out interface{}, where ...interface{}) *DB {
newScope := s.NewScope(out)
newScope.Search.Limit(1)
return newScope.Set("gorm:order_by_primary_key", "ASC").
inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
}
// Take return a record that match given conditions, the order will depend on the database implementation
func (s *DB) Take(out interface{}, where ...interface{}) *DB {
newScope := s.NewScope(out)
newScope.Search.Limit(1)
return newScope.inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
}
// Last find last record that match given conditions, order by primary key
func (s *DB) Last(out interface{}, where ...interface{}) *DB {
newScope := s.NewScope(out)
newScope.Search.Limit(1)
return newScope.Set("gorm:order_by_primary_key", "DESC").
inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
}
// Find find records that match given conditions
func (s *DB) Find(out interface{}, where ...interface{}) *DB {
return s.NewScope(out).inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db
}
//Preloads preloads relations, don`t touch out
func (s *DB) Preloads(out interface{}) *DB {
return s.NewScope(out).InstanceSet("gorm:only_preload", 1).callCallbacks(s.parent.callbacks.queries).db
}
// Scan scan value to a struct
func (s *DB) Scan(dest interface{}) *DB {
return s.NewScope(s.Value).Set("gorm:query_destination", dest).callCallbacks(s.parent.callbacks.queries).db
}
// Row return `*sql.Row` with given conditions
func (s *DB) Row() *sql.Row {
return s.NewScope(s.Value).row()
}
// Rows return `*sql.Rows` with given conditions
func (s *DB) Rows() (*sql.Rows, error) {
return s.NewScope(s.Value).rows()
}
// ScanRows scan `*sql.Rows` to give struct
func (s *DB) ScanRows(rows *sql.Rows, result interface{}) error {
var (
scope = s.NewScope(result)
clone = scope.db
columns, err = rows.Columns()
)
if clone.AddError(err) == nil {
scope.scan(rows, columns, scope.Fields())
}
return clone.Error
}
// Pluck used to query single column from a model as a map
// var ages []int64
// db.Find(&users).Pluck("age", &ages)
func (s *DB) Pluck(column string, value interface{}) *DB {
return s.NewScope(s.Value).pluck(column, value).db
}
// Count get how many records for a model
func (s *DB) Count(value interface{}) *DB {
return s.NewScope(s.Value).count(value).db
}
// Related get related associations
func (s *DB) Related(value interface{}, foreignKeys ...string) *DB {
return s.NewScope(s.Value).related(value, foreignKeys...).db
}
// FirstOrInit find first matched record or initialize a new one with given conditions (only works with struct, map conditions)
// https://jinzhu.github.io/gorm/crud.html#firstorinit
func (s *DB) FirstOrInit(out interface{}, where ...interface{}) *DB {
c := s.clone()
if result := c.First(out, where...); result.Error != nil {
if !result.RecordNotFound() {
return result
}
c.NewScope(out).inlineCondition(where...).initialize()
} else {
c.NewScope(out).updatedAttrsWithValues(c.search.assignAttrs)
}
return c
}
// FirstOrCreate find first matched record or create a new one with given conditions (only works with struct, map conditions)
// https://jinzhu.github.io/gorm/crud.html#firstorcreate
func (s *DB) FirstOrCreate(out interface{}, where ...interface{}) *DB {
c := s.clone()
if result := s.First(out, where...); result.Error != nil {
if !result.RecordNotFound() {
return result
}
return c.NewScope(out).inlineCondition(where...).initialize().callCallbacks(c.parent.callbacks.creates).db
} else if len(c.search.assignAttrs) > 0 {
return c.NewScope(out).InstanceSet("gorm:update_interface", c.search.assignAttrs).callCallbacks(c.parent.callbacks.updates).db
}
return c
}
// Update update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
// WARNING when update with struct, GORM will not update fields that with zero value
func (s *DB) Update(attrs ...interface{}) *DB {
return s.Updates(toSearchableMap(attrs...), true)
}
// Updates update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
func (s *DB) Updates(values interface{}, ignoreProtectedAttrs ...bool) *DB {
return s.NewScope(s.Value).
Set("gorm:ignore_protected_attrs", len(ignoreProtectedAttrs) > 0).
InstanceSet("gorm:update_interface", values).
callCallbacks(s.parent.callbacks.updates).db
}
// UpdateColumn update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
func (s *DB) UpdateColumn(attrs ...interface{}) *DB {
return s.UpdateColumns(toSearchableMap(attrs...))
}
// UpdateColumns update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update
func (s *DB) UpdateColumns(values interface{}) *DB {
return s.NewScope(s.Value).
Set("gorm:update_column", true).
Set("gorm:save_associations", false).
InstanceSet("gorm:update_interface", values).
callCallbacks(s.parent.callbacks.updates).db
}
// Save update value in database, if the value doesn't have primary key, will insert it
func (s *DB) Save(value interface{}) *DB {
scope := s.NewScope(value)
if !scope.PrimaryKeyZero() {
newDB := scope.callCallbacks(s.parent.callbacks.updates).db
if newDB.Error == nil && newDB.RowsAffected == 0 {
return s.New().Table(scope.TableName()).FirstOrCreate(value)
}
return newDB
}
return scope.callCallbacks(s.parent.callbacks.creates).db
}
// Create insert the value into database
func (s *DB) Create(value interface{}) *DB {
scope := s.NewScope(value)
return scope.callCallbacks(s.parent.callbacks.creates).db
}
// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition
// WARNING If model has DeletedAt field, GORM will only set field DeletedAt's value to current time
func (s *DB) Delete(value interface{}, where ...interface{}) *DB {
return s.NewScope(value).inlineCondition(where...).callCallbacks(s.parent.callbacks.deletes).db
}
// Raw use raw sql as conditions, won't run it unless invoked by other methods
// db.Raw("SELECT name, age FROM users WHERE name = ?", 3).Scan(&result)
func (s *DB) Raw(sql string, values ...interface{}) *DB {
return s.clone().search.Raw(true).Where(sql, values...).db
}
// Exec execute raw sql
func (s *DB) Exec(sql string, values ...interface{}) *DB {
scope := s.NewScope(nil)
generatedSQL := scope.buildCondition(map[string]interface{}{"query": sql, "args": values}, true)
generatedSQL = strings.TrimSuffix(strings.TrimPrefix(generatedSQL, "("), ")")
scope.Raw(generatedSQL)
return scope.Exec().db
}
// Model specify the model you would like to run db operations
// // update all users's name to `hello`
// db.Model(&User{}).Update("name", "hello")
// // if user's primary key is non-blank, will use it as condition, then will only update the user's name to `hello`
// db.Model(&user).Update("name", "hello")
func (s *DB) Model(value interface{}) *DB {
c := s.clone()
c.Value = value
return c
}
// Table specify the table you would like to run db operations
func (s *DB) Table(name string) *DB {
clone := s.clone()
clone.search.Table(name)
clone.Value = nil
return clone
}
// Debug start debug mode
func (s *DB) Debug() *DB {
return s.clone().LogMode(true)
}
// Transaction start a transaction as a block,
// return error will rollback, otherwise to commit.
func (s *DB) Transaction(fc func(tx *DB) error) (err error) {
panicked := true
tx := s.Begin()
defer func() {
// Make sure to rollback when panic, Block error or Commit error
if panicked || err != nil {
tx.Rollback()
}
}()
err = fc(tx)
if err == nil {
err = tx.Commit().Error
}
panicked = false
return
}
// Begin begins a transaction
func (s *DB) Begin() *DB {
return s.BeginTx(context.Background(), &sql.TxOptions{})
}
// BeginTx begins a transaction with options
func (s *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) *DB {
c := s.clone()
if db, ok := c.db.(sqlDb); ok && db != nil {
tx, err := db.BeginTx(ctx, opts)
c.db = interface{}(tx).(SQLCommon)
c.dialect.SetDB(c.db)
c.AddError(err)
} else {
c.AddError(ErrCantStartTransaction)
}
return c
}
// Commit commit a transaction
func (s *DB) Commit() *DB {
var emptySQLTx *sql.Tx
if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
s.AddError(db.Commit())
} else {
s.AddError(ErrInvalidTransaction)
}
return s
}
// Rollback rollback a transaction
func (s *DB) Rollback() *DB {
var emptySQLTx *sql.Tx
if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
if err := db.Rollback(); err != nil && err != sql.ErrTxDone {
s.AddError(err)
}
} else {
s.AddError(ErrInvalidTransaction)
}
return s
}
// RollbackUnlessCommitted rollback a transaction if it has not yet been
// committed.
func (s *DB) RollbackUnlessCommitted() *DB {
var emptySQLTx *sql.Tx
if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx {
err := db.Rollback()
// Ignore the error indicating that the transaction has already
// been committed.
if err != sql.ErrTxDone {
s.AddError(err)
}
} else {
s.AddError(ErrInvalidTransaction)
}
return s
}
// NewRecord check if value's primary key is blank
func (s *DB) NewRecord(value interface{}) bool {
return s.NewScope(value).PrimaryKeyZero()
}
// RecordNotFound check if returning ErrRecordNotFound error
func (s *DB) RecordNotFound() bool {
for _, err := range s.GetErrors() {
if err == ErrRecordNotFound {
return true
}
}
return false
}
// CreateTable create table for models
func (s *DB) CreateTable(models ...interface{}) *DB {
db := s.Unscoped()
for _, model := range models {
db = db.NewScope(model).createTable().db
}
return db
}
// DropTable drop table for models
func (s *DB) DropTable(values ...interface{}) *DB {
db := s.clone()
for _, value := range values {
if tableName, ok := value.(string); ok {
db = db.Table(tableName)
}
db = db.NewScope(value).dropTable().db
}
return db
}
// DropTableIfExists drop table if it is exist
func (s *DB) DropTableIfExists(values ...interface{}) *DB {
db := s.clone()
for _, value := range values {
if s.HasTable(value) {
db.AddError(s.DropTable(value).Error)
}
}
return db
}
// HasTable check has table or not
func (s *DB) HasTable(value interface{}) bool {
var (
scope = s.NewScope(value)
tableName string
)
if name, ok := value.(string); ok {
tableName = name
} else {
tableName = scope.TableName()
}
has := scope.Dialect().HasTable(tableName)
s.AddError(scope.db.Error)
return has
}
// AutoMigrate run auto migration for given models, will only add missing fields, won't delete/change current data
func (s *DB) AutoMigrate(values ...interface{}) *DB {
db := s.Unscoped()
for _, value := range values {
db = db.NewScope(value).autoMigrate().db
}
return db
}
// ModifyColumn modify column to type
func (s *DB) ModifyColumn(column string, typ string) *DB {
scope := s.NewScope(s.Value)
scope.modifyColumn(column, typ)
return scope.db
}
// DropColumn drop a column
func (s *DB) DropColumn(column string) *DB {
scope := s.NewScope(s.Value)
scope.dropColumn(column)
return scope.db
}
// AddIndex add index for columns with given name
func (s *DB) AddIndex(indexName string, columns ...string) *DB {
scope := s.Unscoped().NewScope(s.Value)
scope.addIndex(false, indexName, columns...)
return scope.db
}
// AddUniqueIndex add unique index for columns with given name
func (s *DB) AddUniqueIndex(indexName string, columns ...string) *DB {
scope := s.Unscoped().NewScope(s.Value)
scope.addIndex(true, indexName, columns...)
return scope.db
}
// RemoveIndex remove index with name
func (s *DB) RemoveIndex(indexName string) *DB {
scope := s.NewScope(s.Value)
scope.removeIndex(indexName)
return scope.db
}
// AddForeignKey Add foreign key to the given scope, e.g:
// db.Model(&User{}).AddForeignKey("city_id", "cities(id)", "RESTRICT", "RESTRICT")
func (s *DB) AddForeignKey(field string, dest string, onDelete string, onUpdate string) *DB {
scope := s.NewScope(s.Value)
scope.addForeignKey(field, dest, onDelete, onUpdate)
return scope.db
}
// RemoveForeignKey Remove foreign key from the given scope, e.g:
// db.Model(&User{}).RemoveForeignKey("city_id", "cities(id)")
func (s *DB) RemoveForeignKey(field string, dest string) *DB {
scope := s.clone().NewScope(s.Value)
scope.removeForeignKey(field, dest)
return scope.db
}
// Association start `Association Mode` to handler relations things easir in that mode, refer: https://jinzhu.github.io/gorm/associations.html#association-mode
func (s *DB) Association(column string) *Association {
var err error
var scope = s.Set("gorm:association:source", s.Value).NewScope(s.Value)
if primaryField := scope.PrimaryField(); primaryField.IsBlank {
err = errors.New("primary key can't be nil")
} else {
if field, ok := scope.FieldByName(column); ok {
if field.Relationship == nil || len(field.Relationship.ForeignFieldNames) == 0 {
err = fmt.Errorf("invalid association %v for %v", column, scope.IndirectValue().Type())
} else {
return &Association{scope: scope, column: column, field: field}
}
} else {
err = fmt.Errorf("%v doesn't have column %v", scope.IndirectValue().Type(), column)
}
}
return &Association{Error: err}
}
// Preload preload associations with given conditions
// db.Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users)
func (s *DB) Preload(column string, conditions ...interface{}) *DB {
return s.clone().search.Preload(column, conditions...).db
}
// Set set setting by name, which could be used in callbacks, will clone a new db, and update its setting
func (s *DB) Set(name string, value interface{}) *DB {
return s.clone().InstantSet(name, value)
}
// InstantSet instant set setting, will affect current db
func (s *DB) InstantSet(name string, value interface{}) *DB {
s.values.Store(name, value)
return s
}
// Get get setting by name
func (s *DB) Get(name string) (value interface{}, ok bool) {
value, ok = s.values.Load(name)
return
}
// SetJoinTableHandler set a model's join table handler for a relation
func (s *DB) SetJoinTableHandler(source interface{}, column string, handler JoinTableHandlerInterface) {
scope := s.NewScope(source)
for _, field := range scope.GetModelStruct().StructFields {
if field.Name == column || field.DBName == column {
if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" {
source := (&Scope{Value: source}).GetModelStruct().ModelType
destination := (&Scope{Value: reflect.New(field.Struct.Type).Interface()}).GetModelStruct().ModelType
handler.Setup(field.Relationship, many2many, source, destination)
field.Relationship.JoinTableHandler = handler
if table := handler.Table(s); scope.Dialect().HasTable(table) {
s.Table(table).AutoMigrate(handler)
}
}
}
}
}
// AddError add error to the db
func (s *DB) AddError(err error) error {
if err != nil {
if err != ErrRecordNotFound {
if s.logMode == defaultLogMode {
go s.print("error", fileWithLineNum(), err)
} else {
s.log(err)
}
errors := Errors(s.GetErrors())
errors = errors.Add(err)
if len(errors) > 1 {
err = errors
}
}
s.Error = err
}
return err
}
// GetErrors get happened errors from the db
func (s *DB) GetErrors() []error {
if errs, ok := s.Error.(Errors); ok {
return errs
} else if s.Error != nil {
return []error{s.Error}
}
return []error{}
}
////////////////////////////////////////////////////////////////////////////////
// Private Methods For DB
////////////////////////////////////////////////////////////////////////////////
func (s *DB) clone() *DB {
db := &DB{
db: s.db,
parent: s.parent,
logger: s.logger,
logMode: s.logMode,
Value: s.Value,
Error: s.Error,
blockGlobalUpdate: s.blockGlobalUpdate,
dialect: newDialect(s.dialect.GetName(), s.db),
nowFuncOverride: s.nowFuncOverride,
}
s.values.Range(func(k, v interface{}) bool {
db.values.Store(k, v)
return true
})
if s.search == nil {
db.search = &search{limit: -1, offset: -1}
} else {
db.search = s.search.clone()
}
db.search.db = db
return db
}
func (s *DB) print(v ...interface{}) {
s.logger.Print(v...)
}
func (s *DB) log(v ...interface{}) {
if s != nil && s.logMode == detailedLogMode {
s.print(append([]interface{}{"log", fileWithLineNum()}, v...)...)
}
}
func (s *DB) slog(sql string, t time.Time, vars ...interface{}) {
if s.logMode == detailedLogMode {
s.print("sql", fileWithLineNum(), NowFunc().Sub(t), sql, vars, s.RowsAffected)
}
}

View File

@ -1,14 +0,0 @@
package gorm
import "time"
// Model base model definition, including fields `ID`, `CreatedAt`, `UpdatedAt`, `DeletedAt`, which could be embedded in your models
// type User struct {
// gorm.Model
// }
type Model struct {
ID uint `gorm:"primary_key"`
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time `sql:"index"`
}

View File

@ -1,671 +0,0 @@
package gorm
import (
"database/sql"
"errors"
"go/ast"
"reflect"
"strings"
"sync"
"time"
"github.com/jinzhu/inflection"
)
// DefaultTableNameHandler default table name handler
var DefaultTableNameHandler = func(db *DB, defaultTableName string) string {
return defaultTableName
}
// lock for mutating global cached model metadata
var structsLock sync.Mutex
// global cache of model metadata
var modelStructsMap sync.Map
// ModelStruct model definition
type ModelStruct struct {
PrimaryFields []*StructField
StructFields []*StructField
ModelType reflect.Type
defaultTableName string
l sync.Mutex
}
// TableName returns model's table name
func (s *ModelStruct) TableName(db *DB) string {
s.l.Lock()
defer s.l.Unlock()
if s.defaultTableName == "" && db != nil && s.ModelType != nil {
// Set default table name
if tabler, ok := reflect.New(s.ModelType).Interface().(tabler); ok {
s.defaultTableName = tabler.TableName()
} else {
tableName := ToTableName(s.ModelType.Name())
db.parent.RLock()
if db == nil || (db.parent != nil && !db.parent.singularTable) {
tableName = inflection.Plural(tableName)
}
db.parent.RUnlock()
s.defaultTableName = tableName
}
}
return DefaultTableNameHandler(db, s.defaultTableName)
}
// StructField model field's struct definition
type StructField struct {
DBName string
Name string
Names []string
IsPrimaryKey bool
IsNormal bool
IsIgnored bool
IsScanner bool
HasDefaultValue bool
Tag reflect.StructTag
TagSettings map[string]string
Struct reflect.StructField
IsForeignKey bool
Relationship *Relationship
tagSettingsLock sync.RWMutex
}
// TagSettingsSet Sets a tag in the tag settings map
func (sf *StructField) TagSettingsSet(key, val string) {
sf.tagSettingsLock.Lock()
defer sf.tagSettingsLock.Unlock()
sf.TagSettings[key] = val
}
// TagSettingsGet returns a tag from the tag settings
func (sf *StructField) TagSettingsGet(key string) (string, bool) {
sf.tagSettingsLock.RLock()
defer sf.tagSettingsLock.RUnlock()
val, ok := sf.TagSettings[key]
return val, ok
}
// TagSettingsDelete deletes a tag
func (sf *StructField) TagSettingsDelete(key string) {
sf.tagSettingsLock.Lock()
defer sf.tagSettingsLock.Unlock()
delete(sf.TagSettings, key)
}
func (sf *StructField) clone() *StructField {
clone := &StructField{
DBName: sf.DBName,
Name: sf.Name,
Names: sf.Names,
IsPrimaryKey: sf.IsPrimaryKey,
IsNormal: sf.IsNormal,
IsIgnored: sf.IsIgnored,
IsScanner: sf.IsScanner,
HasDefaultValue: sf.HasDefaultValue,
Tag: sf.Tag,
TagSettings: map[string]string{},
Struct: sf.Struct,
IsForeignKey: sf.IsForeignKey,
}
if sf.Relationship != nil {
relationship := *sf.Relationship
clone.Relationship = &relationship
}
// copy the struct field tagSettings, they should be read-locked while they are copied
sf.tagSettingsLock.Lock()
defer sf.tagSettingsLock.Unlock()
for key, value := range sf.TagSettings {
clone.TagSettings[key] = value
}
return clone
}
// Relationship described the relationship between models
type Relationship struct {
Kind string
PolymorphicType string
PolymorphicDBName string
PolymorphicValue string
ForeignFieldNames []string
ForeignDBNames []string
AssociationForeignFieldNames []string
AssociationForeignDBNames []string
JoinTableHandler JoinTableHandlerInterface
}
func getForeignField(column string, fields []*StructField) *StructField {
for _, field := range fields {
if field.Name == column || field.DBName == column || field.DBName == ToColumnName(column) {
return field
}
}
return nil
}
// GetModelStruct get value's model struct, relationships based on struct and tag definition
func (scope *Scope) GetModelStruct() *ModelStruct {
var modelStruct ModelStruct
// Scope value can't be nil
if scope.Value == nil {
return &modelStruct
}
reflectType := reflect.ValueOf(scope.Value).Type()
for reflectType.Kind() == reflect.Slice || reflectType.Kind() == reflect.Ptr {
reflectType = reflectType.Elem()
}
// Scope value need to be a struct
if reflectType.Kind() != reflect.Struct {
return &modelStruct
}
// Get Cached model struct
isSingularTable := false
if scope.db != nil && scope.db.parent != nil {
scope.db.parent.RLock()
isSingularTable = scope.db.parent.singularTable
scope.db.parent.RUnlock()
}
hashKey := struct {
singularTable bool
reflectType reflect.Type
}{isSingularTable, reflectType}
if value, ok := modelStructsMap.Load(hashKey); ok && value != nil {
return value.(*ModelStruct)
}
modelStruct.ModelType = reflectType
// Get all fields
for i := 0; i < reflectType.NumField(); i++ {
if fieldStruct := reflectType.Field(i); ast.IsExported(fieldStruct.Name) {
field := &StructField{
Struct: fieldStruct,
Name: fieldStruct.Name,
Names: []string{fieldStruct.Name},
Tag: fieldStruct.Tag,
TagSettings: parseTagSetting(fieldStruct.Tag),
}
// is ignored field
if _, ok := field.TagSettingsGet("-"); ok {
field.IsIgnored = true
} else {
if _, ok := field.TagSettingsGet("PRIMARY_KEY"); ok {
field.IsPrimaryKey = true
modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field)
}
if _, ok := field.TagSettingsGet("DEFAULT"); ok && !field.IsPrimaryKey {
field.HasDefaultValue = true
}
if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok && !field.IsPrimaryKey {
field.HasDefaultValue = true
}
indirectType := fieldStruct.Type
for indirectType.Kind() == reflect.Ptr {
indirectType = indirectType.Elem()
}
fieldValue := reflect.New(indirectType).Interface()
if _, isScanner := fieldValue.(sql.Scanner); isScanner {
// is scanner
field.IsScanner, field.IsNormal = true, true
if indirectType.Kind() == reflect.Struct {
for i := 0; i < indirectType.NumField(); i++ {
for key, value := range parseTagSetting(indirectType.Field(i).Tag) {
if _, ok := field.TagSettingsGet(key); !ok {
field.TagSettingsSet(key, value)
}
}
}
}
} else if _, isTime := fieldValue.(*time.Time); isTime {
// is time
field.IsNormal = true
} else if _, ok := field.TagSettingsGet("EMBEDDED"); ok || fieldStruct.Anonymous {
// is embedded struct
for _, subField := range scope.New(fieldValue).GetModelStruct().StructFields {
subField = subField.clone()
subField.Names = append([]string{fieldStruct.Name}, subField.Names...)
if prefix, ok := field.TagSettingsGet("EMBEDDED_PREFIX"); ok {
subField.DBName = prefix + subField.DBName
}
if subField.IsPrimaryKey {
if _, ok := subField.TagSettingsGet("PRIMARY_KEY"); ok {
modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, subField)
} else {
subField.IsPrimaryKey = false
}
}
if subField.Relationship != nil && subField.Relationship.JoinTableHandler != nil {
if joinTableHandler, ok := subField.Relationship.JoinTableHandler.(*JoinTableHandler); ok {
newJoinTableHandler := &JoinTableHandler{}
newJoinTableHandler.Setup(subField.Relationship, joinTableHandler.TableName, reflectType, joinTableHandler.Destination.ModelType)
subField.Relationship.JoinTableHandler = newJoinTableHandler
}
}
modelStruct.StructFields = append(modelStruct.StructFields, subField)
}
continue
} else {
// build relationships
switch indirectType.Kind() {
case reflect.Slice:
defer func(field *StructField) {
var (
relationship = &Relationship{}
toScope = scope.New(reflect.New(field.Struct.Type).Interface())
foreignKeys []string
associationForeignKeys []string
elemType = field.Struct.Type
)
if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" {
foreignKeys = strings.Split(foreignKey, ",")
}
if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" {
associationForeignKeys = strings.Split(foreignKey, ",")
} else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" {
associationForeignKeys = strings.Split(foreignKey, ",")
}
for elemType.Kind() == reflect.Slice || elemType.Kind() == reflect.Ptr {
elemType = elemType.Elem()
}
if elemType.Kind() == reflect.Struct {
if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" {
relationship.Kind = "many_to_many"
{ // Foreign Keys for Source
joinTableDBNames := []string{}
if foreignKey, _ := field.TagSettingsGet("JOINTABLE_FOREIGNKEY"); foreignKey != "" {
joinTableDBNames = strings.Split(foreignKey, ",")
}
// if no foreign keys defined with tag
if len(foreignKeys) == 0 {
for _, field := range modelStruct.PrimaryFields {
foreignKeys = append(foreignKeys, field.DBName)
}
}
for idx, foreignKey := range foreignKeys {
if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil {
// source foreign keys (db names)
relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.DBName)
// setup join table foreign keys for source
if len(joinTableDBNames) > idx {
// if defined join table's foreign key
relationship.ForeignDBNames = append(relationship.ForeignDBNames, joinTableDBNames[idx])
} else {
defaultJointableForeignKey := ToColumnName(reflectType.Name()) + "_" + foreignField.DBName
relationship.ForeignDBNames = append(relationship.ForeignDBNames, defaultJointableForeignKey)
}
}
}
}
{ // Foreign Keys for Association (Destination)
associationJoinTableDBNames := []string{}
if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_JOINTABLE_FOREIGNKEY"); foreignKey != "" {
associationJoinTableDBNames = strings.Split(foreignKey, ",")
}
// if no association foreign keys defined with tag
if len(associationForeignKeys) == 0 {
for _, field := range toScope.PrimaryFields() {
associationForeignKeys = append(associationForeignKeys, field.DBName)
}
}
for idx, name := range associationForeignKeys {
if field, ok := toScope.FieldByName(name); ok {
// association foreign keys (db names)
relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.DBName)
// setup join table foreign keys for association
if len(associationJoinTableDBNames) > idx {
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationJoinTableDBNames[idx])
} else {
// join table foreign keys for association
joinTableDBName := ToColumnName(elemType.Name()) + "_" + field.DBName
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, joinTableDBName)
}
}
}
}
joinTableHandler := JoinTableHandler{}
joinTableHandler.Setup(relationship, many2many, reflectType, elemType)
relationship.JoinTableHandler = &joinTableHandler
field.Relationship = relationship
} else {
// User has many comments, associationType is User, comment use UserID as foreign key
var associationType = reflectType.Name()
var toFields = toScope.GetStructFields()
relationship.Kind = "has_many"
if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" {
// Dog has many toys, tag polymorphic is Owner, then associationType is Owner
// Toy use OwnerID, OwnerType ('dogs') as foreign key
if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil {
associationType = polymorphic
relationship.PolymorphicType = polymorphicType.Name
relationship.PolymorphicDBName = polymorphicType.DBName
// if Dog has multiple set of toys set name of the set (instead of default 'dogs')
if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok {
relationship.PolymorphicValue = value
} else {
relationship.PolymorphicValue = scope.TableName()
}
polymorphicType.IsForeignKey = true
}
}
// if no foreign keys defined with tag
if len(foreignKeys) == 0 {
// if no association foreign keys defined with tag
if len(associationForeignKeys) == 0 {
for _, field := range modelStruct.PrimaryFields {
foreignKeys = append(foreignKeys, associationType+field.Name)
associationForeignKeys = append(associationForeignKeys, field.Name)
}
} else {
// generate foreign keys from defined association foreign keys
for _, scopeFieldName := range associationForeignKeys {
if foreignField := getForeignField(scopeFieldName, modelStruct.StructFields); foreignField != nil {
foreignKeys = append(foreignKeys, associationType+foreignField.Name)
associationForeignKeys = append(associationForeignKeys, foreignField.Name)
}
}
}
} else {
// generate association foreign keys from foreign keys
if len(associationForeignKeys) == 0 {
for _, foreignKey := range foreignKeys {
if strings.HasPrefix(foreignKey, associationType) {
associationForeignKey := strings.TrimPrefix(foreignKey, associationType)
if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
associationForeignKeys = append(associationForeignKeys, associationForeignKey)
}
}
}
if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
associationForeignKeys = []string{scope.PrimaryKey()}
}
} else if len(foreignKeys) != len(associationForeignKeys) {
scope.Err(errors.New("invalid foreign keys, should have same length"))
return
}
}
for idx, foreignKey := range foreignKeys {
if foreignField := getForeignField(foreignKey, toFields); foreignField != nil {
if associationField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); associationField != nil {
// mark field as foreignkey, use global lock to avoid race
structsLock.Lock()
foreignField.IsForeignKey = true
structsLock.Unlock()
// association foreign keys
relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name)
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName)
// association foreign keys
relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
}
}
}
if len(relationship.ForeignFieldNames) != 0 {
field.Relationship = relationship
}
}
} else {
field.IsNormal = true
}
}(field)
case reflect.Struct:
defer func(field *StructField) {
var (
// user has one profile, associationType is User, profile use UserID as foreign key
// user belongs to profile, associationType is Profile, user use ProfileID as foreign key
associationType = reflectType.Name()
relationship = &Relationship{}
toScope = scope.New(reflect.New(field.Struct.Type).Interface())
toFields = toScope.GetStructFields()
tagForeignKeys []string
tagAssociationForeignKeys []string
)
if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" {
tagForeignKeys = strings.Split(foreignKey, ",")
}
if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" {
tagAssociationForeignKeys = strings.Split(foreignKey, ",")
} else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" {
tagAssociationForeignKeys = strings.Split(foreignKey, ",")
}
if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" {
// Cat has one toy, tag polymorphic is Owner, then associationType is Owner
// Toy use OwnerID, OwnerType ('cats') as foreign key
if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil {
associationType = polymorphic
relationship.PolymorphicType = polymorphicType.Name
relationship.PolymorphicDBName = polymorphicType.DBName
// if Cat has several different types of toys set name for each (instead of default 'cats')
if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok {
relationship.PolymorphicValue = value
} else {
relationship.PolymorphicValue = scope.TableName()
}
polymorphicType.IsForeignKey = true
}
}
// Has One
{
var foreignKeys = tagForeignKeys
var associationForeignKeys = tagAssociationForeignKeys
// if no foreign keys defined with tag
if len(foreignKeys) == 0 {
// if no association foreign keys defined with tag
if len(associationForeignKeys) == 0 {
for _, primaryField := range modelStruct.PrimaryFields {
foreignKeys = append(foreignKeys, associationType+primaryField.Name)
associationForeignKeys = append(associationForeignKeys, primaryField.Name)
}
} else {
// generate foreign keys form association foreign keys
for _, associationForeignKey := range tagAssociationForeignKeys {
if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
foreignKeys = append(foreignKeys, associationType+foreignField.Name)
associationForeignKeys = append(associationForeignKeys, foreignField.Name)
}
}
}
} else {
// generate association foreign keys from foreign keys
if len(associationForeignKeys) == 0 {
for _, foreignKey := range foreignKeys {
if strings.HasPrefix(foreignKey, associationType) {
associationForeignKey := strings.TrimPrefix(foreignKey, associationType)
if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil {
associationForeignKeys = append(associationForeignKeys, associationForeignKey)
}
}
}
if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
associationForeignKeys = []string{scope.PrimaryKey()}
}
} else if len(foreignKeys) != len(associationForeignKeys) {
scope.Err(errors.New("invalid foreign keys, should have same length"))
return
}
}
for idx, foreignKey := range foreignKeys {
if foreignField := getForeignField(foreignKey, toFields); foreignField != nil {
if scopeField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); scopeField != nil {
// mark field as foreignkey, use global lock to avoid race
structsLock.Lock()
foreignField.IsForeignKey = true
structsLock.Unlock()
// association foreign keys
relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scopeField.Name)
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scopeField.DBName)
// association foreign keys
relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
}
}
}
}
if len(relationship.ForeignFieldNames) != 0 {
relationship.Kind = "has_one"
field.Relationship = relationship
} else {
var foreignKeys = tagForeignKeys
var associationForeignKeys = tagAssociationForeignKeys
if len(foreignKeys) == 0 {
// generate foreign keys & association foreign keys
if len(associationForeignKeys) == 0 {
for _, primaryField := range toScope.PrimaryFields() {
foreignKeys = append(foreignKeys, field.Name+primaryField.Name)
associationForeignKeys = append(associationForeignKeys, primaryField.Name)
}
} else {
// generate foreign keys with association foreign keys
for _, associationForeignKey := range associationForeignKeys {
if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil {
foreignKeys = append(foreignKeys, field.Name+foreignField.Name)
associationForeignKeys = append(associationForeignKeys, foreignField.Name)
}
}
}
} else {
// generate foreign keys & association foreign keys
if len(associationForeignKeys) == 0 {
for _, foreignKey := range foreignKeys {
if strings.HasPrefix(foreignKey, field.Name) {
associationForeignKey := strings.TrimPrefix(foreignKey, field.Name)
if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil {
associationForeignKeys = append(associationForeignKeys, associationForeignKey)
}
}
}
if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 {
associationForeignKeys = []string{toScope.PrimaryKey()}
}
} else if len(foreignKeys) != len(associationForeignKeys) {
scope.Err(errors.New("invalid foreign keys, should have same length"))
return
}
}
for idx, foreignKey := range foreignKeys {
if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil {
if associationField := getForeignField(associationForeignKeys[idx], toFields); associationField != nil {
// mark field as foreignkey, use global lock to avoid race
structsLock.Lock()
foreignField.IsForeignKey = true
structsLock.Unlock()
// association foreign keys
relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name)
relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName)
// source foreign keys
relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name)
relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName)
}
}
}
if len(relationship.ForeignFieldNames) != 0 {
relationship.Kind = "belongs_to"
field.Relationship = relationship
}
}
}(field)
default:
field.IsNormal = true
}
}
}
// Even it is ignored, also possible to decode db value into the field
if value, ok := field.TagSettingsGet("COLUMN"); ok {
field.DBName = value
} else {
field.DBName = ToColumnName(fieldStruct.Name)
}
modelStruct.StructFields = append(modelStruct.StructFields, field)
}
}
if len(modelStruct.PrimaryFields) == 0 {
if field := getForeignField("id", modelStruct.StructFields); field != nil {
field.IsPrimaryKey = true
modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field)
}
}
modelStructsMap.Store(hashKey, &modelStruct)
return &modelStruct
}
// GetStructFields get model's field structs
func (scope *Scope) GetStructFields() (fields []*StructField) {
return scope.GetModelStruct().StructFields
}
func parseTagSetting(tags reflect.StructTag) map[string]string {
setting := map[string]string{}
for _, str := range []string{tags.Get("sql"), tags.Get("gorm")} {
if str == "" {
continue
}
tags := strings.Split(str, ";")
for _, value := range tags {
v := strings.Split(value, ":")
k := strings.TrimSpace(strings.ToUpper(v[0]))
if len(v) >= 2 {
setting[k] = strings.Join(v[1:], ":")
} else {
setting[k] = k
}
}
}
return setting
}

View File

@ -1,124 +0,0 @@
package gorm
import (
"bytes"
"strings"
)
// Namer is a function type which is given a string and return a string
type Namer func(string) string
// NamingStrategy represents naming strategies
type NamingStrategy struct {
DB Namer
Table Namer
Column Namer
}
// TheNamingStrategy is being initialized with defaultNamingStrategy
var TheNamingStrategy = &NamingStrategy{
DB: defaultNamer,
Table: defaultNamer,
Column: defaultNamer,
}
// AddNamingStrategy sets the naming strategy
func AddNamingStrategy(ns *NamingStrategy) {
if ns.DB == nil {
ns.DB = defaultNamer
}
if ns.Table == nil {
ns.Table = defaultNamer
}
if ns.Column == nil {
ns.Column = defaultNamer
}
TheNamingStrategy = ns
}
// DBName alters the given name by DB
func (ns *NamingStrategy) DBName(name string) string {
return ns.DB(name)
}
// TableName alters the given name by Table
func (ns *NamingStrategy) TableName(name string) string {
return ns.Table(name)
}
// ColumnName alters the given name by Column
func (ns *NamingStrategy) ColumnName(name string) string {
return ns.Column(name)
}
// ToDBName convert string to db name
func ToDBName(name string) string {
return TheNamingStrategy.DBName(name)
}
// ToTableName convert string to table name
func ToTableName(name string) string {
return TheNamingStrategy.TableName(name)
}
// ToColumnName convert string to db name
func ToColumnName(name string) string {
return TheNamingStrategy.ColumnName(name)
}
var smap = newSafeMap()
func defaultNamer(name string) string {
const (
lower = false
upper = true
)
if v := smap.Get(name); v != "" {
return v
}
if name == "" {
return ""
}
var (
value = commonInitialismsReplacer.Replace(name)
buf = bytes.NewBufferString("")
lastCase, currCase, nextCase, nextNumber bool
)
for i, v := range value[:len(value)-1] {
nextCase = bool(value[i+1] >= 'A' && value[i+1] <= 'Z')
nextNumber = bool(value[i+1] >= '0' && value[i+1] <= '9')
if i > 0 {
if currCase == upper {
if lastCase == upper && (nextCase == upper || nextNumber == upper) {
buf.WriteRune(v)
} else {
if value[i-1] != '_' && value[i+1] != '_' {
buf.WriteRune('_')
}
buf.WriteRune(v)
}
} else {
buf.WriteRune(v)
if i == len(value)-2 && (nextCase == upper && nextNumber == lower) {
buf.WriteRune('_')
}
}
} else {
currCase = upper
buf.WriteRune(v)
}
lastCase = currCase
currCase = nextCase
}
buf.WriteByte(value[len(value)-1])
s := strings.ToLower(buf.String())
smap.Set(name, s)
return s
}

1421
vendor/github.com/jinzhu/gorm/scope.go generated vendored

File diff suppressed because it is too large Load Diff

View File

@ -1,153 +0,0 @@
package gorm
import (
"fmt"
)
type search struct {
db *DB
whereConditions []map[string]interface{}
orConditions []map[string]interface{}
notConditions []map[string]interface{}
havingConditions []map[string]interface{}
joinConditions []map[string]interface{}
initAttrs []interface{}
assignAttrs []interface{}
selects map[string]interface{}
omits []string
orders []interface{}
preload []searchPreload
offset interface{}
limit interface{}
group string
tableName string
raw bool
Unscoped bool
ignoreOrderQuery bool
}
type searchPreload struct {
schema string
conditions []interface{}
}
func (s *search) clone() *search {
clone := *s
return &clone
}
func (s *search) Where(query interface{}, values ...interface{}) *search {
s.whereConditions = append(s.whereConditions, map[string]interface{}{"query": query, "args": values})
return s
}
func (s *search) Not(query interface{}, values ...interface{}) *search {
s.notConditions = append(s.notConditions, map[string]interface{}{"query": query, "args": values})
return s
}
func (s *search) Or(query interface{}, values ...interface{}) *search {
s.orConditions = append(s.orConditions, map[string]interface{}{"query": query, "args": values})
return s
}
func (s *search) Attrs(attrs ...interface{}) *search {
s.initAttrs = append(s.initAttrs, toSearchableMap(attrs...))
return s
}
func (s *search) Assign(attrs ...interface{}) *search {
s.assignAttrs = append(s.assignAttrs, toSearchableMap(attrs...))
return s
}
func (s *search) Order(value interface{}, reorder ...bool) *search {
if len(reorder) > 0 && reorder[0] {
s.orders = []interface{}{}
}
if value != nil && value != "" {
s.orders = append(s.orders, value)
}
return s
}
func (s *search) Select(query interface{}, args ...interface{}) *search {
s.selects = map[string]interface{}{"query": query, "args": args}
return s
}
func (s *search) Omit(columns ...string) *search {
s.omits = columns
return s
}
func (s *search) Limit(limit interface{}) *search {
s.limit = limit
return s
}
func (s *search) Offset(offset interface{}) *search {
s.offset = offset
return s
}
func (s *search) Group(query string) *search {
s.group = s.getInterfaceAsSQL(query)
return s
}
func (s *search) Having(query interface{}, values ...interface{}) *search {
if val, ok := query.(*SqlExpr); ok {
s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": val.expr, "args": val.args})
} else {
s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": query, "args": values})
}
return s
}
func (s *search) Joins(query string, values ...interface{}) *search {
s.joinConditions = append(s.joinConditions, map[string]interface{}{"query": query, "args": values})
return s
}
func (s *search) Preload(schema string, values ...interface{}) *search {
var preloads []searchPreload
for _, preload := range s.preload {
if preload.schema != schema {
preloads = append(preloads, preload)
}
}
preloads = append(preloads, searchPreload{schema, values})
s.preload = preloads
return s
}
func (s *search) Raw(b bool) *search {
s.raw = b
return s
}
func (s *search) unscoped() *search {
s.Unscoped = true
return s
}
func (s *search) Table(name string) *search {
s.tableName = name
return s
}
func (s *search) getInterfaceAsSQL(value interface{}) (str string) {
switch value.(type) {
case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
str = fmt.Sprintf("%v", value)
default:
s.db.AddError(ErrInvalidSQL)
}
if str == "-1" {
return ""
}
return
}

View File

@ -1,5 +0,0 @@
dialects=("postgres" "mysql" "mssql" "sqlite")
for dialect in "${dialects[@]}" ; do
DEBUG=false GORM_DIALECT=${dialect} go test
done

View File

@ -1,226 +0,0 @@
package gorm
import (
"database/sql/driver"
"fmt"
"reflect"
"regexp"
"runtime"
"strings"
"sync"
"time"
)
// NowFunc returns current time, this function is exported in order to be able
// to give the flexibility to the developer to customize it according to their
// needs, e.g:
// gorm.NowFunc = func() time.Time {
// return time.Now().UTC()
// }
var NowFunc = func() time.Time {
return time.Now()
}
// Copied from golint
var commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UID", "UI", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"}
var commonInitialismsReplacer *strings.Replacer
var goSrcRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*.go`)
var goTestRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*test.go`)
func init() {
var commonInitialismsForReplacer []string
for _, initialism := range commonInitialisms {
commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism)))
}
commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...)
}
type safeMap struct {
m map[string]string
l *sync.RWMutex
}
func (s *safeMap) Set(key string, value string) {
s.l.Lock()
defer s.l.Unlock()
s.m[key] = value
}
func (s *safeMap) Get(key string) string {
s.l.RLock()
defer s.l.RUnlock()
return s.m[key]
}
func newSafeMap() *safeMap {
return &safeMap{l: new(sync.RWMutex), m: make(map[string]string)}
}
// SQL expression
type SqlExpr struct {
expr string
args []interface{}
}
// Expr generate raw SQL expression, for example:
// DB.Model(&product).Update("price", gorm.Expr("price * ? + ?", 2, 100))
func Expr(expression string, args ...interface{}) *SqlExpr {
return &SqlExpr{expr: expression, args: args}
}
func indirect(reflectValue reflect.Value) reflect.Value {
for reflectValue.Kind() == reflect.Ptr {
reflectValue = reflectValue.Elem()
}
return reflectValue
}
func toQueryMarks(primaryValues [][]interface{}) string {
var results []string
for _, primaryValue := range primaryValues {
var marks []string
for range primaryValue {
marks = append(marks, "?")
}
if len(marks) > 1 {
results = append(results, fmt.Sprintf("(%v)", strings.Join(marks, ",")))
} else {
results = append(results, strings.Join(marks, ""))
}
}
return strings.Join(results, ",")
}
func toQueryCondition(scope *Scope, columns []string) string {
var newColumns []string
for _, column := range columns {
newColumns = append(newColumns, scope.Quote(column))
}
if len(columns) > 1 {
return fmt.Sprintf("(%v)", strings.Join(newColumns, ","))
}
return strings.Join(newColumns, ",")
}
func toQueryValues(values [][]interface{}) (results []interface{}) {
for _, value := range values {
for _, v := range value {
results = append(results, v)
}
}
return
}
func fileWithLineNum() string {
for i := 2; i < 15; i++ {
_, file, line, ok := runtime.Caller(i)
if ok && (!goSrcRegexp.MatchString(file) || goTestRegexp.MatchString(file)) {
return fmt.Sprintf("%v:%v", file, line)
}
}
return ""
}
func isBlank(value reflect.Value) bool {
switch value.Kind() {
case reflect.String:
return value.Len() == 0
case reflect.Bool:
return !value.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return value.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return value.Uint() == 0
case reflect.Float32, reflect.Float64:
return value.Float() == 0
case reflect.Interface, reflect.Ptr:
return value.IsNil()
}
return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface())
}
func toSearchableMap(attrs ...interface{}) (result interface{}) {
if len(attrs) > 1 {
if str, ok := attrs[0].(string); ok {
result = map[string]interface{}{str: attrs[1]}
}
} else if len(attrs) == 1 {
if attr, ok := attrs[0].(map[string]interface{}); ok {
result = attr
}
if attr, ok := attrs[0].(interface{}); ok {
result = attr
}
}
return
}
func equalAsString(a interface{}, b interface{}) bool {
return toString(a) == toString(b)
}
func toString(str interface{}) string {
if values, ok := str.([]interface{}); ok {
var results []string
for _, value := range values {
results = append(results, toString(value))
}
return strings.Join(results, "_")
} else if bytes, ok := str.([]byte); ok {
return string(bytes)
} else if reflectValue := reflect.Indirect(reflect.ValueOf(str)); reflectValue.IsValid() {
return fmt.Sprintf("%v", reflectValue.Interface())
}
return ""
}
func makeSlice(elemType reflect.Type) interface{} {
if elemType.Kind() == reflect.Slice {
elemType = elemType.Elem()
}
sliceType := reflect.SliceOf(elemType)
slice := reflect.New(sliceType)
slice.Elem().Set(reflect.MakeSlice(sliceType, 0, 0))
return slice.Interface()
}
func strInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
// getValueFromFields return given fields's value
func getValueFromFields(value reflect.Value, fieldNames []string) (results []interface{}) {
// If value is a nil pointer, Indirect returns a zero Value!
// Therefor we need to check for a zero value,
// as FieldByName could panic
if indirectValue := reflect.Indirect(value); indirectValue.IsValid() {
for _, fieldName := range fieldNames {
if fieldValue := reflect.Indirect(indirectValue.FieldByName(fieldName)); fieldValue.IsValid() {
result := fieldValue.Interface()
if r, ok := result.(driver.Valuer); ok {
result, _ = r.Value()
}
results = append(results, result)
}
}
}
return
}
func addExtraSpaceIfExist(str string) string {
if str != "" {
return " " + str
}
return ""
}

View File

@ -1,154 +0,0 @@
# use the default golang container from Docker Hub
box: golang
services:
- name: mariadb
id: mariadb:latest
env:
MYSQL_DATABASE: gorm
MYSQL_USER: gorm
MYSQL_PASSWORD: gorm
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
- name: mysql
id: mysql:latest
env:
MYSQL_DATABASE: gorm
MYSQL_USER: gorm
MYSQL_PASSWORD: gorm
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
- name: mysql57
id: mysql:5.7
env:
MYSQL_DATABASE: gorm
MYSQL_USER: gorm
MYSQL_PASSWORD: gorm
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
- name: mysql56
id: mysql:5.6
env:
MYSQL_DATABASE: gorm
MYSQL_USER: gorm
MYSQL_PASSWORD: gorm
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
- name: postgres
id: postgres:latest
env:
POSTGRES_USER: gorm
POSTGRES_PASSWORD: gorm
POSTGRES_DB: gorm
- name: postgres96
id: postgres:9.6
env:
POSTGRES_USER: gorm
POSTGRES_PASSWORD: gorm
POSTGRES_DB: gorm
- name: postgres95
id: postgres:9.5
env:
POSTGRES_USER: gorm
POSTGRES_PASSWORD: gorm
POSTGRES_DB: gorm
- name: postgres94
id: postgres:9.4
env:
POSTGRES_USER: gorm
POSTGRES_PASSWORD: gorm
POSTGRES_DB: gorm
- name: postgres93
id: postgres:9.3
env:
POSTGRES_USER: gorm
POSTGRES_PASSWORD: gorm
POSTGRES_DB: gorm
- name: mssql
id: mcmoe/mssqldocker:latest
env:
ACCEPT_EULA: Y
SA_PASSWORD: LoremIpsum86
MSSQL_DB: gorm
MSSQL_USER: gorm
MSSQL_PASSWORD: LoremIpsum86
# The steps that will be executed in the build pipeline
build:
# The steps that will be executed on build
steps:
# Sets the go workspace and places you package
# at the right place in the workspace tree
- setup-go-workspace
# Gets the dependencies
- script:
name: go get
code: |
cd $WERCKER_SOURCE_DIR
go version
go get -t -v ./...
# Build the project
- script:
name: go build
code: |
go build ./...
# Test the project
- script:
name: test sqlite
code: |
go test -race -v ./...
- script:
name: test mariadb
code: |
GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mariadb:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
- script:
name: test mysql
code: |
GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
- script:
name: test mysql5.7
code: |
GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql57:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
- script:
name: test mysql5.6
code: |
GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql56:3306)/gorm?charset=utf8&parseTime=True" go test -race ./...
- script:
name: test postgres
code: |
GORM_DIALECT=postgres GORM_DSN="host=postgres user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
- script:
name: test postgres96
code: |
GORM_DIALECT=postgres GORM_DSN="host=postgres96 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
- script:
name: test postgres95
code: |
GORM_DIALECT=postgres GORM_DSN="host=postgres95 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
- script:
name: test postgres94
code: |
GORM_DIALECT=postgres GORM_DSN="host=postgres94 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
- script:
name: test postgres93
code: |
GORM_DIALECT=postgres GORM_DSN="host=postgres93 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./...
- script:
name: test mssql
code: |
GORM_DIALECT=mssql GORM_DSN="sqlserver://gorm:LoremIpsum86@mssql:1433?database=gorm" go test -race ./...
- script:
name: codecov
code: |
go test -race -coverprofile=coverage.txt -covermode=atomic ./...
bash <(curl -s https://codecov.io/bash)

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 - Jinzhu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,55 +0,0 @@
# Inflection
Inflection pluralizes and singularizes English nouns
[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930)
## Basic Usage
```go
inflection.Plural("person") => "people"
inflection.Plural("Person") => "People"
inflection.Plural("PERSON") => "PEOPLE"
inflection.Plural("bus") => "buses"
inflection.Plural("BUS") => "BUSES"
inflection.Plural("Bus") => "Buses"
inflection.Singular("people") => "person"
inflection.Singular("People") => "Person"
inflection.Singular("PEOPLE") => "PERSON"
inflection.Singular("buses") => "bus"
inflection.Singular("BUSES") => "BUS"
inflection.Singular("Buses") => "Bus"
inflection.Plural("FancyPerson") => "FancyPeople"
inflection.Singular("FancyPeople") => "FancyPerson"
```
## Register Rules
Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
If you want to register more rules, follow:
```
inflection.AddUncountable("fish")
inflection.AddIrregular("person", "people")
inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
```
## Contributing
You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
## Author
**jinzhu**
* <http://github.com/jinzhu>
* <wosmvp@gmail.com>
* <http://twitter.com/zhangjinzhu>
## License
Released under the [MIT License](http://www.opensource.org/licenses/MIT).

View File

@ -1 +0,0 @@
module github.com/jinzhu/inflection

View File

@ -1,273 +0,0 @@
/*
Package inflection pluralizes and singularizes English nouns.
inflection.Plural("person") => "people"
inflection.Plural("Person") => "People"
inflection.Plural("PERSON") => "PEOPLE"
inflection.Singular("people") => "person"
inflection.Singular("People") => "Person"
inflection.Singular("PEOPLE") => "PERSON"
inflection.Plural("FancyPerson") => "FancydPeople"
inflection.Singular("FancyPeople") => "FancydPerson"
Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
If you want to register more rules, follow:
inflection.AddUncountable("fish")
inflection.AddIrregular("person", "people")
inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
*/
package inflection
import (
"regexp"
"strings"
)
type inflection struct {
regexp *regexp.Regexp
replace string
}
// Regular is a regexp find replace inflection
type Regular struct {
find string
replace string
}
// Irregular is a hard replace inflection,
// containing both singular and plural forms
type Irregular struct {
singular string
plural string
}
// RegularSlice is a slice of Regular inflections
type RegularSlice []Regular
// IrregularSlice is a slice of Irregular inflections
type IrregularSlice []Irregular
var pluralInflections = RegularSlice{
{"([a-z])$", "${1}s"},
{"s$", "s"},
{"^(ax|test)is$", "${1}es"},
{"(octop|vir)us$", "${1}i"},
{"(octop|vir)i$", "${1}i"},
{"(alias|status)$", "${1}es"},
{"(bu)s$", "${1}ses"},
{"(buffal|tomat)o$", "${1}oes"},
{"([ti])um$", "${1}a"},
{"([ti])a$", "${1}a"},
{"sis$", "ses"},
{"(?:([^f])fe|([lr])f)$", "${1}${2}ves"},
{"(hive)$", "${1}s"},
{"([^aeiouy]|qu)y$", "${1}ies"},
{"(x|ch|ss|sh)$", "${1}es"},
{"(matr|vert|ind)(?:ix|ex)$", "${1}ices"},
{"^(m|l)ouse$", "${1}ice"},
{"^(m|l)ice$", "${1}ice"},
{"^(ox)$", "${1}en"},
{"^(oxen)$", "${1}"},
{"(quiz)$", "${1}zes"},
}
var singularInflections = RegularSlice{
{"s$", ""},
{"(ss)$", "${1}"},
{"(n)ews$", "${1}ews"},
{"([ti])a$", "${1}um"},
{"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"},
{"(^analy)(sis|ses)$", "${1}sis"},
{"([^f])ves$", "${1}fe"},
{"(hive)s$", "${1}"},
{"(tive)s$", "${1}"},
{"([lr])ves$", "${1}f"},
{"([^aeiouy]|qu)ies$", "${1}y"},
{"(s)eries$", "${1}eries"},
{"(m)ovies$", "${1}ovie"},
{"(c)ookies$", "${1}ookie"},
{"(x|ch|ss|sh)es$", "${1}"},
{"^(m|l)ice$", "${1}ouse"},
{"(bus)(es)?$", "${1}"},
{"(o)es$", "${1}"},
{"(shoe)s$", "${1}"},
{"(cris|test)(is|es)$", "${1}is"},
{"^(a)x[ie]s$", "${1}xis"},
{"(octop|vir)(us|i)$", "${1}us"},
{"(alias|status)(es)?$", "${1}"},
{"^(ox)en", "${1}"},
{"(vert|ind)ices$", "${1}ex"},
{"(matr)ices$", "${1}ix"},
{"(quiz)zes$", "${1}"},
{"(database)s$", "${1}"},
}
var irregularInflections = IrregularSlice{
{"person", "people"},
{"man", "men"},
{"child", "children"},
{"sex", "sexes"},
{"move", "moves"},
{"mombie", "mombies"},
}
var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"}
var compiledPluralMaps []inflection
var compiledSingularMaps []inflection
func compile() {
compiledPluralMaps = []inflection{}
compiledSingularMaps = []inflection{}
for _, uncountable := range uncountableInflections {
inf := inflection{
regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"),
replace: "${1}",
}
compiledPluralMaps = append(compiledPluralMaps, inf)
compiledSingularMaps = append(compiledSingularMaps, inf)
}
for _, value := range irregularInflections {
infs := []inflection{
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)},
inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)},
inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural},
}
compiledPluralMaps = append(compiledPluralMaps, infs...)
}
for _, value := range irregularInflections {
infs := []inflection{
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)},
inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)},
inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular},
}
compiledSingularMaps = append(compiledSingularMaps, infs...)
}
for i := len(pluralInflections) - 1; i >= 0; i-- {
value := pluralInflections[i]
infs := []inflection{
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
}
compiledPluralMaps = append(compiledPluralMaps, infs...)
}
for i := len(singularInflections) - 1; i >= 0; i-- {
value := singularInflections[i]
infs := []inflection{
inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
}
compiledSingularMaps = append(compiledSingularMaps, infs...)
}
}
func init() {
compile()
}
// AddPlural adds a plural inflection
func AddPlural(find, replace string) {
pluralInflections = append(pluralInflections, Regular{find, replace})
compile()
}
// AddSingular adds a singular inflection
func AddSingular(find, replace string) {
singularInflections = append(singularInflections, Regular{find, replace})
compile()
}
// AddIrregular adds an irregular inflection
func AddIrregular(singular, plural string) {
irregularInflections = append(irregularInflections, Irregular{singular, plural})
compile()
}
// AddUncountable adds an uncountable inflection
func AddUncountable(values ...string) {
uncountableInflections = append(uncountableInflections, values...)
compile()
}
// GetPlural retrieves the plural inflection values
func GetPlural() RegularSlice {
plurals := make(RegularSlice, len(pluralInflections))
copy(plurals, pluralInflections)
return plurals
}
// GetSingular retrieves the singular inflection values
func GetSingular() RegularSlice {
singulars := make(RegularSlice, len(singularInflections))
copy(singulars, singularInflections)
return singulars
}
// GetIrregular retrieves the irregular inflection values
func GetIrregular() IrregularSlice {
irregular := make(IrregularSlice, len(irregularInflections))
copy(irregular, irregularInflections)
return irregular
}
// GetUncountable retrieves the uncountable inflection values
func GetUncountable() []string {
uncountables := make([]string, len(uncountableInflections))
copy(uncountables, uncountableInflections)
return uncountables
}
// SetPlural sets the plural inflections slice
func SetPlural(inflections RegularSlice) {
pluralInflections = inflections
compile()
}
// SetSingular sets the singular inflections slice
func SetSingular(inflections RegularSlice) {
singularInflections = inflections
compile()
}
// SetIrregular sets the irregular inflections slice
func SetIrregular(inflections IrregularSlice) {
irregularInflections = inflections
compile()
}
// SetUncountable sets the uncountable inflections slice
func SetUncountable(inflections []string) {
uncountableInflections = inflections
compile()
}
// Plural converts a word to its plural form
func Plural(str string) string {
for _, inflection := range compiledPluralMaps {
if inflection.regexp.MatchString(str) {
return inflection.regexp.ReplaceAllString(str, inflection.replace)
}
}
return str
}
// Singular converts a word to its singular form
func Singular(str string) string {
for _, inflection := range compiledSingularMaps {
if inflection.regexp.MatchString(str) {
return inflection.regexp.ReplaceAllString(str, inflection.replace)
}
}
return str
}

View File

@ -1,23 +0,0 @@
box: golang
build:
steps:
- setup-go-workspace
# Gets the dependencies
- script:
name: go get
code: |
go get
# Build the project
- script:
name: go build
code: |
go build ./...
# Test the project
- script:
name: go test
code: |
go test ./...

View File

@ -1,27 +0,0 @@
# vim: ft=yaml sw=2 ts=2
language: go
# enable database services
services:
- mysql
- postgresql
# create test database
before_install:
- mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;'
- psql -c 'create database sqlxtest;' -U postgres
- go get github.com/mattn/goveralls
- export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true"
- export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable"
- export SQLX_SQLITE_DSN="$HOME/sqlxtest.db"
# go versions to test
go:
- "1.8"
- "1.9"
- "1.10.x"
# run tests w/ coverage
script:
- travis_retry $GOPATH/bin/goveralls -service=travis-ci

View File

@ -1,23 +0,0 @@
Copyright (c) 2013, Jason Moiron
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,187 +0,0 @@
# sqlx
[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
sqlx is a library which provides a set of extensions on go's standard
`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
et al. all leave the underlying interfaces untouched, so that their interfaces
are a superset on the standard ones. This makes it relatively painless to
integrate existing codebases using database/sql with sqlx.
Major additional concepts are:
* Marshal rows into structs (with embedded struct support), maps, and slices
* Named parameter support including prepared statements
* `Get` and `Select` to go quickly from query to struct/slice
In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx),
there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that
explains how to use `database/sql` along with sqlx.
## Recent Changes
* The [introduction](https://github.com/jmoiron/sqlx/pull/387) of `sql.ColumnType` sets the required minimum Go version to 1.8.
* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions.
This breaks backwards compatibility, but it's in a way that is trivially fixable
(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in
active development currently.
* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905).
### Backwards Compatibility
There is no Go1-like promise of absolute stability, but I take the issue seriously
and will maintain the library in a compatible state unless vital bugs prevent me
from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and
[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior,
a wider API cleanup was done at the time of fixing. It's possible this will happen
in future; if it does, a git tag will be provided for users requiring the old
behavior to continue to use it until such a time as they can migrate.
## install
go get github.com/jmoiron/sqlx
## issues
Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
`Columns()` does not fully qualify column names in queries like:
```sql
SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
```
making a struct or map destination ambiguous. Use `AS` in your queries
to give columns distinct names, `rows.Scan` to scan them manually, or
`SliceScan` to get a slice of results.
## usage
Below is an example which shows some common use cases for sqlx. Check
[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
usage.
```go
package main
import (
"database/sql"
"fmt"
"log"
_ "github.com/lib/pq"
"github.com/jmoiron/sqlx"
)
var schema = `
CREATE TABLE person (
first_name text,
last_name text,
email text
);
CREATE TABLE place (
country text,
city text NULL,
telcode integer
)`
type Person struct {
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
Email string
}
type Place struct {
Country string
City sql.NullString
TelCode int
}
func main() {
// this Pings the database trying to connect, panics on error
// use sqlx.Open() for sql.Open() semantics
db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
if err != nil {
log.Fatalln(err)
}
// exec the schema or fail; multi-statement Exec behavior varies between
// database drivers; pq will exec them all, sqlite3 won't, ymmv
db.MustExec(schema)
tx := db.MustBegin()
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
// Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
tx.Commit()
// Query the database, storing results in a []Person (wrapped in []interface{})
people := []Person{}
db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
jason, john := people[0], people[1]
fmt.Printf("%#v\n%#v", jason, john)
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
// Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
// You can also get a single result, a la QueryRow
jason = Person{}
err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
fmt.Printf("%#v\n", jason)
// Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
// if you have null fields and use SELECT *, you must use sql.Null* in your struct
places := []Place{}
err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
if err != nil {
fmt.Println(err)
return
}
usa, singsing, honkers := places[0], places[1], places[2]
fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
// Loop through rows using only one struct
place := Place{}
rows, err := db.Queryx("SELECT * FROM place")
for rows.Next() {
err := rows.StructScan(&place)
if err != nil {
log.Fatalln(err)
}
fmt.Printf("%#v\n", place)
}
// Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
// Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
// Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
// Named queries, using `:name` as the bindvar. Automatic bindvar support
// which takes into account the dbtype based on the driverName on sqlx.Open/Connect
_, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
map[string]interface{}{
"first": "Bin",
"last": "Smuth",
"email": "bensmith@allblacks.nz",
})
// Selects Mr. Smith from the database
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
// Named queries can also use structs. Their bind names follow the same rules
// as the name -> db mapping, so struct fields are lowercased and the `db` tag
// is taken into consideration.
rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
}
```

View File

@ -1,217 +0,0 @@
package sqlx
import (
"bytes"
"database/sql/driver"
"errors"
"reflect"
"strconv"
"strings"
"github.com/jmoiron/sqlx/reflectx"
)
// Bindvar types supported by Rebind, BindMap and BindStruct.
const (
UNKNOWN = iota
QUESTION
DOLLAR
NAMED
AT
)
// BindType returns the bindtype for a given database given a drivername.
func BindType(driverName string) int {
switch driverName {
case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres":
return DOLLAR
case "mysql":
return QUESTION
case "sqlite3":
return QUESTION
case "oci8", "ora", "goracle":
return NAMED
case "sqlserver":
return AT
}
return UNKNOWN
}
// FIXME: this should be able to be tolerant of escaped ?'s in queries without
// losing much speed, and should be to avoid confusion.
// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
func Rebind(bindType int, query string) string {
switch bindType {
case QUESTION, UNKNOWN:
return query
}
// Add space enough for 10 params before we have to allocate
rqb := make([]byte, 0, len(query)+10)
var i, j int
for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") {
rqb = append(rqb, query[:i]...)
switch bindType {
case DOLLAR:
rqb = append(rqb, '$')
case NAMED:
rqb = append(rqb, ':', 'a', 'r', 'g')
case AT:
rqb = append(rqb, '@', 'p')
}
j++
rqb = strconv.AppendInt(rqb, int64(j), 10)
query = query[i+1:]
}
return string(append(rqb, query...))
}
// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
// much simpler and should be more resistant to odd unicode, but it is twice as
// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
// problems arise with its somewhat naive handling of unicode.
func rebindBuff(bindType int, query string) string {
if bindType != DOLLAR {
return query
}
b := make([]byte, 0, len(query))
rqb := bytes.NewBuffer(b)
j := 1
for _, r := range query {
if r == '?' {
rqb.WriteRune('$')
rqb.WriteString(strconv.Itoa(j))
j++
} else {
rqb.WriteRune(r)
}
}
return rqb.String()
}
// In expands slice values in args, returning the modified query string
// and a new arg list that can be executed by a database. The `query` should
// use the `?` bindVar. The return value uses the `?` bindVar.
func In(query string, args ...interface{}) (string, []interface{}, error) {
// argMeta stores reflect.Value and length for slices and
// the value itself for non-slice arguments
type argMeta struct {
v reflect.Value
i interface{}
length int
}
var flatArgsCount int
var anySlices bool
meta := make([]argMeta, len(args))
for i, arg := range args {
if a, ok := arg.(driver.Valuer); ok {
arg, _ = a.Value()
}
v := reflect.ValueOf(arg)
t := reflectx.Deref(v.Type())
// []byte is a driver.Value type so it should not be expanded
if t.Kind() == reflect.Slice && t != reflect.TypeOf([]byte{}) {
meta[i].length = v.Len()
meta[i].v = v
anySlices = true
flatArgsCount += meta[i].length
if meta[i].length == 0 {
return "", nil, errors.New("empty slice passed to 'in' query")
}
} else {
meta[i].i = arg
flatArgsCount++
}
}
// don't do any parsing if there aren't any slices; note that this means
// some errors that we might have caught below will not be returned.
if !anySlices {
return query, args, nil
}
newArgs := make([]interface{}, 0, flatArgsCount)
buf := make([]byte, 0, len(query)+len(", ?")*flatArgsCount)
var arg, offset int
for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
if arg >= len(meta) {
// if an argument wasn't passed, lets return an error; this is
// not actually how database/sql Exec/Query works, but since we are
// creating an argument list programmatically, we want to be able
// to catch these programmer errors earlier.
return "", nil, errors.New("number of bindVars exceeds arguments")
}
argMeta := meta[arg]
arg++
// not a slice, continue.
// our questionmark will either be written before the next expansion
// of a slice or after the loop when writing the rest of the query
if argMeta.length == 0 {
offset = offset + i + 1
newArgs = append(newArgs, argMeta.i)
continue
}
// write everything up to and including our ? character
buf = append(buf, query[:offset+i+1]...)
for si := 1; si < argMeta.length; si++ {
buf = append(buf, ", ?"...)
}
newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length)
// slice the query and reset the offset. this avoids some bookkeeping for
// the write after the loop
query = query[offset+i+1:]
offset = 0
}
buf = append(buf, query...)
if arg < len(meta) {
return "", nil, errors.New("number of bindVars less than number arguments")
}
return string(buf), newArgs, nil
}
func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} {
switch val := v.Interface().(type) {
case []interface{}:
args = append(args, val...)
case []int:
for i := range val {
args = append(args, val[i])
}
case []string:
for i := range val {
args = append(args, val[i])
}
default:
for si := 0; si < vlen; si++ {
args = append(args, v.Index(si).Interface())
}
}
return args
}

View File

@ -1,12 +0,0 @@
// Package sqlx provides general purpose extensions to database/sql.
//
// It is intended to seamlessly wrap database/sql and provide convenience
// methods which are useful in the development of database driven applications.
// None of the underlying database/sql methods are changed. Instead all extended
// behavior is implemented through new methods defined on wrapper types.
//
// Additions include scanning into structs, named query support, rebinding
// queries for different drivers, convenient shorthands for common error handling
// and more.
//
package sqlx

View File

@ -1,7 +0,0 @@
module github.com/jmoiron/sqlx
require (
github.com/go-sql-driver/mysql v1.4.0
github.com/lib/pq v1.0.0
github.com/mattn/go-sqlite3 v1.9.0
)

View File

@ -1,6 +0,0 @@
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=

View File

@ -1,356 +0,0 @@
package sqlx
// Named Query Support
//
// * BindMap - bind query bindvars to map/struct args
// * NamedExec, NamedQuery - named query w/ struct or map
// * NamedStmt - a pre-compiled named query which is a prepared statement
//
// Internal Interfaces:
//
// * compileNamedQuery - rebind a named query, returning a query and list of names
// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
//
import (
"database/sql"
"errors"
"fmt"
"reflect"
"strconv"
"unicode"
"github.com/jmoiron/sqlx/reflectx"
)
// NamedStmt is a prepared statement that executes named queries. Prepare it
// how you would execute a NamedQuery, but pass in a struct or map when executing.
type NamedStmt struct {
Params []string
QueryString string
Stmt *Stmt
}
// Close closes the named statement.
func (n *NamedStmt) Close() error {
return n.Stmt.Close()
}
// Exec executes a named statement using the struct passed.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return *new(sql.Result), err
}
return n.Stmt.Exec(args...)
}
// Query executes a named statement using the struct argument, returning rows.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return nil, err
}
return n.Stmt.Query(args...)
}
// QueryRow executes a named statement against the database. Because sqlx cannot
// create a *sql.Row with an error condition pre-set for binding errors, sqlx
// returns a *sqlx.Row instead.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryRow(arg interface{}) *Row {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return &Row{err: err}
}
return n.Stmt.QueryRowx(args...)
}
// MustExec execs a NamedStmt, panicing on error
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
res, err := n.Exec(arg)
if err != nil {
panic(err)
}
return res
}
// Queryx using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
r, err := n.Query(arg)
if err != nil {
return nil, err
}
return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
}
// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
// an alias for QueryRow.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
return n.QueryRow(arg)
}
// Select using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
rows, err := n.Queryx(arg)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// Get using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
r := n.QueryRowx(arg)
return r.scanAny(dest, false)
}
// Unsafe creates an unsafe version of the NamedStmt
func (n *NamedStmt) Unsafe() *NamedStmt {
r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
r.Stmt.unsafe = true
return r
}
// A union interface of preparer and binder, required to be able to prepare
// named statements (as the bindtype must be determined).
type namedPreparer interface {
Preparer
binder
}
func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
bindType := BindType(p.DriverName())
q, args, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return nil, err
}
stmt, err := Preparex(p, q)
if err != nil {
return nil, err
}
return &NamedStmt{
QueryString: q,
Params: args,
Stmt: stmt,
}, nil
}
func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
if maparg, ok := arg.(map[string]interface{}); ok {
return bindMapArgs(names, maparg)
}
return bindArgs(names, arg, m)
}
// private interface to generate a list of interfaces from a given struct
// type, given a list of names to pull out of the struct. Used by public
// BindStruct interface.
func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
arglist := make([]interface{}, 0, len(names))
// grab the indirected value of arg
v := reflect.ValueOf(arg)
for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
v = v.Elem()
}
err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error {
if len(t) == 0 {
return fmt.Errorf("could not find name %s in %#v", names[i], arg)
}
val := reflectx.FieldByIndexesReadOnly(v, t)
arglist = append(arglist, val.Interface())
return nil
})
return arglist, err
}
// like bindArgs, but for maps.
func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
arglist := make([]interface{}, 0, len(names))
for _, name := range names {
val, ok := arg[name]
if !ok {
return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
}
arglist = append(arglist, val)
}
return arglist, nil
}
// bindStruct binds a named parameter query with fields from a struct argument.
// The rules for binding field names to parameter names follow the same
// conventions as for StructScan, including obeying the `db` struct tags.
func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
bound, names, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return "", []interface{}{}, err
}
arglist, err := bindArgs(names, arg, m)
if err != nil {
return "", []interface{}{}, err
}
return bound, arglist, nil
}
// bindMap binds a named parameter query with a map of arguments.
func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
bound, names, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return "", []interface{}{}, err
}
arglist, err := bindMapArgs(names, args)
return bound, arglist, err
}
// -- Compilation of Named Queries
// Allow digits and letters in bind params; additionally runes are
// checked against underscores, meaning that bind params can have be
// alphanumeric with underscores. Mind the difference between unicode
// digits and numbers, where '5' is a digit but '五' is not.
var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
// FIXME: this function isn't safe for unicode named params, as a failing test
// can testify. This is not a regression but a failure of the original code
// as well. It should be modified to range over runes in a string rather than
// bytes, even though this is less convenient and slower. Hopefully the
// addition of the prepared NamedStmt (which will only do this once) will make
// up for the slightly slower ad-hoc NamedExec/NamedQuery.
// compile a NamedQuery into an unbound query (using the '?' bindvar) and
// a list of names.
func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
names = make([]string, 0, 10)
rebound := make([]byte, 0, len(qs))
inName := false
last := len(qs) - 1
currentVar := 1
name := make([]byte, 0, 10)
for i, b := range qs {
// a ':' while we're in a name is an error
if b == ':' {
// if this is the second ':' in a '::' escape sequence, append a ':'
if inName && i > 0 && qs[i-1] == ':' {
rebound = append(rebound, ':')
inName = false
continue
} else if inName {
err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
return query, names, err
}
inName = true
name = []byte{}
} else if inName && i > 0 && b == '=' {
rebound = append(rebound, ':', '=')
inName = false
continue
// if we're in a name, and this is an allowed character, continue
} else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last {
// append the byte to the name if we are in a name and not on the last byte
name = append(name, b)
// if we're in a name and it's not an allowed character, the name is done
} else if inName {
inName = false
// if this is the final byte of the string and it is part of the name, then
// make sure to add it to the name
if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
name = append(name, b)
}
// add the string representation to the names list
names = append(names, string(name))
// add a proper bindvar for the bindType
switch bindType {
// oracle only supports named type bind vars even for positional
case NAMED:
rebound = append(rebound, ':')
rebound = append(rebound, name...)
case QUESTION, UNKNOWN:
rebound = append(rebound, '?')
case DOLLAR:
rebound = append(rebound, '$')
for _, b := range strconv.Itoa(currentVar) {
rebound = append(rebound, byte(b))
}
currentVar++
case AT:
rebound = append(rebound, '@', 'p')
for _, b := range strconv.Itoa(currentVar) {
rebound = append(rebound, byte(b))
}
currentVar++
}
// add this byte to string unless it was not part of the name
if i != last {
rebound = append(rebound, b)
} else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
rebound = append(rebound, b)
}
} else {
// this is a normal byte and should just go onto the rebound query
rebound = append(rebound, b)
}
}
return string(rebound), names, err
}
// BindNamed binds a struct or a map to a query with named parameters.
// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(bindType, query, arg, mapper())
}
// Named takes a query using named parameters and an argument and
// returns a new query with a list of args that can be executed by
// a database. The return value uses the `?` bindvar.
func Named(query string, arg interface{}) (string, []interface{}, error) {
return bindNamedMapper(QUESTION, query, arg, mapper())
}
func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
if maparg, ok := arg.(map[string]interface{}); ok {
return bindMap(bindType, query, maparg)
}
return bindStruct(bindType, query, arg, m)
}
// NamedQuery binds a named query and then runs Query on the result using the
// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
// map[string]interface{} types.
func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.Queryx(q, args...)
}
// NamedExec uses BindStruct to get a query executable by the driver and
// then runs Exec on the result. Returns an error from the binding
// or the query excution itself.
func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.Exec(q, args...)
}

View File

@ -1,132 +0,0 @@
// +build go1.8
package sqlx
import (
"context"
"database/sql"
)
// A union interface of contextPreparer and binder, required to be able to
// prepare named statements with context (as the bindtype must be determined).
type namedPreparerContext interface {
PreparerContext
binder
}
func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) {
bindType := BindType(p.DriverName())
q, args, err := compileNamedQuery([]byte(query), bindType)
if err != nil {
return nil, err
}
stmt, err := PreparexContext(ctx, p, q)
if err != nil {
return nil, err
}
return &NamedStmt{
QueryString: q,
Params: args,
Stmt: stmt,
}, nil
}
// ExecContext executes a named statement using the struct passed.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return *new(sql.Result), err
}
return n.Stmt.ExecContext(ctx, args...)
}
// QueryContext executes a named statement using the struct argument, returning rows.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return nil, err
}
return n.Stmt.QueryContext(ctx, args...)
}
// QueryRowContext executes a named statement against the database. Because sqlx cannot
// create a *sql.Row with an error condition pre-set for binding errors, sqlx
// returns a *sqlx.Row instead.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row {
args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
if err != nil {
return &Row{err: err}
}
return n.Stmt.QueryRowxContext(ctx, args...)
}
// MustExecContext execs a NamedStmt, panicing on error
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result {
res, err := n.ExecContext(ctx, arg)
if err != nil {
panic(err)
}
return res
}
// QueryxContext using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) {
r, err := n.QueryContext(ctx, arg)
if err != nil {
return nil, err
}
return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
}
// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is
// an alias for QueryRow.
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row {
return n.QueryRowContext(ctx, arg)
}
// SelectContext using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error {
rows, err := n.QueryxContext(ctx, arg)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// GetContext using this NamedStmt
// Any named placeholder parameters are replaced with fields from arg.
func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error {
r := n.QueryRowxContext(ctx, arg)
return r.scanAny(dest, false)
}
// NamedQueryContext binds a named query and then runs Query on the result using the
// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
// map[string]interface{} types.
func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.QueryxContext(ctx, q, args...)
}
// NamedExecContext uses BindStruct to get a query executable by the driver and
// then runs Exec on the result. Returns an error from the binding
// or the query excution itself.
func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) {
q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
if err != nil {
return nil, err
}
return e.ExecContext(ctx, q, args...)
}

View File

@ -1,17 +0,0 @@
# reflectx
The sqlx package has special reflect needs. In particular, it needs to:
* be able to map a name to a field
* understand embedded structs
* understand mapping names to fields by a particular tag
* user specified name -> field mapping functions
These behaviors mimic the behaviors by the standard library marshallers and also the
behavior of standard Go accessors.
The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
tags in the ways that are vital to most marshallers, and they are slow.
This reflectx package extends reflect to achieve these goals.

View File

@ -1,441 +0,0 @@
// Package reflectx implements extensions to the standard reflect lib suitable
// for implementing marshalling and unmarshalling packages. The main Mapper type
// allows for Go-compatible named attribute access, including accessing embedded
// struct attributes and the ability to use functions and struct tags to
// customize field names.
//
package reflectx
import (
"reflect"
"runtime"
"strings"
"sync"
)
// A FieldInfo is metadata for a struct field.
type FieldInfo struct {
Index []int
Path string
Field reflect.StructField
Zero reflect.Value
Name string
Options map[string]string
Embedded bool
Children []*FieldInfo
Parent *FieldInfo
}
// A StructMap is an index of field metadata for a struct.
type StructMap struct {
Tree *FieldInfo
Index []*FieldInfo
Paths map[string]*FieldInfo
Names map[string]*FieldInfo
}
// GetByPath returns a *FieldInfo for a given string path.
func (f StructMap) GetByPath(path string) *FieldInfo {
return f.Paths[path]
}
// GetByTraversal returns a *FieldInfo for a given integer path. It is
// analogous to reflect.FieldByIndex, but using the cached traversal
// rather than re-executing the reflect machinery each time.
func (f StructMap) GetByTraversal(index []int) *FieldInfo {
if len(index) == 0 {
return nil
}
tree := f.Tree
for _, i := range index {
if i >= len(tree.Children) || tree.Children[i] == nil {
return nil
}
tree = tree.Children[i]
}
return tree
}
// Mapper is a general purpose mapper of names to struct fields. A Mapper
// behaves like most marshallers in the standard library, obeying a field tag
// for name mapping but also providing a basic transform function.
type Mapper struct {
cache map[reflect.Type]*StructMap
tagName string
tagMapFunc func(string) string
mapFunc func(string) string
mutex sync.Mutex
}
// NewMapper returns a new mapper using the tagName as its struct field tag.
// If tagName is the empty string, it is ignored.
func NewMapper(tagName string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
}
}
// NewMapperTagFunc returns a new mapper which contains a mapper for field names
// AND a mapper for tag values. This is useful for tags like json which can
// have values like "name,omitempty".
func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
mapFunc: mapFunc,
tagMapFunc: tagMapFunc,
}
}
// NewMapperFunc returns a new mapper which optionally obeys a field tag and
// a struct field name mapper func given by f. Tags will take precedence, but
// for any other field, the mapped name will be f(field.Name)
func NewMapperFunc(tagName string, f func(string) string) *Mapper {
return &Mapper{
cache: make(map[reflect.Type]*StructMap),
tagName: tagName,
mapFunc: f,
}
}
// TypeMap returns a mapping of field strings to int slices representing
// the traversal down the struct to reach the field.
func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
m.mutex.Lock()
mapping, ok := m.cache[t]
if !ok {
mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
m.cache[t] = mapping
}
m.mutex.Unlock()
return mapping
}
// FieldMap returns the mapper's mapping of field names to reflect values. Panics
// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
r := map[string]reflect.Value{}
tm := m.TypeMap(v.Type())
for tagName, fi := range tm.Names {
r[tagName] = FieldByIndexes(v, fi.Index)
}
return r
}
// FieldByName returns a field by its mapped name as a reflect.Value.
// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
// Returns zero Value if the name is not found.
func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
tm := m.TypeMap(v.Type())
fi, ok := tm.Names[name]
if !ok {
return v
}
return FieldByIndexes(v, fi.Index)
}
// FieldsByName returns a slice of values corresponding to the slice of names
// for the value. Panics if v's Kind is not Struct or v is not Indirectable
// to a struct Kind. Returns zero Value for each name not found.
func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
v = reflect.Indirect(v)
mustBe(v, reflect.Struct)
tm := m.TypeMap(v.Type())
vals := make([]reflect.Value, 0, len(names))
for _, name := range names {
fi, ok := tm.Names[name]
if !ok {
vals = append(vals, *new(reflect.Value))
} else {
vals = append(vals, FieldByIndexes(v, fi.Index))
}
}
return vals
}
// TraversalsByName returns a slice of int slices which represent the struct
// traversals for each mapped name. Panics if t is not a struct or Indirectable
// to a struct. Returns empty int slice for each name not found.
func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
r := make([][]int, 0, len(names))
m.TraversalsByNameFunc(t, names, func(_ int, i []int) error {
if i == nil {
r = append(r, []int{})
} else {
r = append(r, i)
}
return nil
})
return r
}
// TraversalsByNameFunc traverses the mapped names and calls fn with the index of
// each name and the struct traversal represented by that name. Panics if t is not
// a struct or Indirectable to a struct. Returns the first error returned by fn or nil.
func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error {
t = Deref(t)
mustBe(t, reflect.Struct)
tm := m.TypeMap(t)
for i, name := range names {
fi, ok := tm.Names[name]
if !ok {
if err := fn(i, nil); err != nil {
return err
}
} else {
if err := fn(i, fi.Index); err != nil {
return err
}
}
}
return nil
}
// FieldByIndexes returns a value for the field given by the struct traversal
// for the given value.
func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
for _, i := range indexes {
v = reflect.Indirect(v).Field(i)
// if this is a pointer and it's nil, allocate a new value and set it
if v.Kind() == reflect.Ptr && v.IsNil() {
alloc := reflect.New(Deref(v.Type()))
v.Set(alloc)
}
if v.Kind() == reflect.Map && v.IsNil() {
v.Set(reflect.MakeMap(v.Type()))
}
}
return v
}
// FieldByIndexesReadOnly returns a value for a particular struct traversal,
// but is not concerned with allocating nil pointers because the value is
// going to be used for reading and not setting.
func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
for _, i := range indexes {
v = reflect.Indirect(v).Field(i)
}
return v
}
// Deref is Indirect for reflect.Types
func Deref(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t
}
// -- helpers & utilities --
type kinder interface {
Kind() reflect.Kind
}
// mustBe checks a value against a kind, panicing with a reflect.ValueError
// if the kind isn't that which is required.
func mustBe(v kinder, expected reflect.Kind) {
if k := v.Kind(); k != expected {
panic(&reflect.ValueError{Method: methodName(), Kind: k})
}
}
// methodName returns the caller of the function calling methodName
func methodName() string {
pc, _, _, _ := runtime.Caller(2)
f := runtime.FuncForPC(pc)
if f == nil {
return "unknown method"
}
return f.Name()
}
type typeQueue struct {
t reflect.Type
fi *FieldInfo
pp string // Parent path
}
// A copying append that creates a new slice each time.
func apnd(is []int, i int) []int {
x := make([]int, len(is)+1)
for p, n := range is {
x[p] = n
}
x[len(x)-1] = i
return x
}
type mapf func(string) string
// parseName parses the tag and the target name for the given field using
// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the
// field's name to a target name, and tagMapFunc for mapping the tag to
// a target name.
func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) {
// first, set the fieldName to the field's name
fieldName = field.Name
// if a mapFunc is set, use that to override the fieldName
if mapFunc != nil {
fieldName = mapFunc(fieldName)
}
// if there's no tag to look for, return the field name
if tagName == "" {
return "", fieldName
}
// if this tag is not set using the normal convention in the tag,
// then return the fieldname.. this check is done because according
// to the reflect documentation:
// If the tag does not have the conventional format,
// the value returned by Get is unspecified.
// which doesn't sound great.
if !strings.Contains(string(field.Tag), tagName+":") {
return "", fieldName
}
// at this point we're fairly sure that we have a tag, so lets pull it out
tag = field.Tag.Get(tagName)
// if we have a mapper function, call it on the whole tag
// XXX: this is a change from the old version, which pulled out the name
// before the tagMapFunc could be run, but I think this is the right way
if tagMapFunc != nil {
tag = tagMapFunc(tag)
}
// finally, split the options from the name
parts := strings.Split(tag, ",")
fieldName = parts[0]
return tag, fieldName
}
// parseOptions parses options out of a tag string, skipping the name
func parseOptions(tag string) map[string]string {
parts := strings.Split(tag, ",")
options := make(map[string]string, len(parts))
if len(parts) > 1 {
for _, opt := range parts[1:] {
// short circuit potentially expensive split op
if strings.Contains(opt, "=") {
kv := strings.Split(opt, "=")
options[kv[0]] = kv[1]
continue
}
options[opt] = ""
}
}
return options
}
// getMapping returns a mapping for the t type, using the tagName, mapFunc and
// tagMapFunc to determine the canonical names of fields.
func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap {
m := []*FieldInfo{}
root := &FieldInfo{}
queue := []typeQueue{}
queue = append(queue, typeQueue{Deref(t), root, ""})
QueueLoop:
for len(queue) != 0 {
// pop the first item off of the queue
tq := queue[0]
queue = queue[1:]
// ignore recursive field
for p := tq.fi.Parent; p != nil; p = p.Parent {
if tq.fi.Field.Type == p.Field.Type {
continue QueueLoop
}
}
nChildren := 0
if tq.t.Kind() == reflect.Struct {
nChildren = tq.t.NumField()
}
tq.fi.Children = make([]*FieldInfo, nChildren)
// iterate through all of its fields
for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
f := tq.t.Field(fieldPos)
// parse the tag and the target name using the mapping options for this field
tag, name := parseName(f, tagName, mapFunc, tagMapFunc)
// if the name is "-", disabled via a tag, skip it
if name == "-" {
continue
}
fi := FieldInfo{
Field: f,
Name: name,
Zero: reflect.New(f.Type).Elem(),
Options: parseOptions(tag),
}
// if the path is empty this path is just the name
if tq.pp == "" {
fi.Path = fi.Name
} else {
fi.Path = tq.pp + "." + fi.Name
}
// skip unexported fields
if len(f.PkgPath) != 0 && !f.Anonymous {
continue
}
// bfs search of anonymous embedded structs
if f.Anonymous {
pp := tq.pp
if tag != "" {
pp = fi.Path
}
fi.Embedded = true
fi.Index = apnd(tq.fi.Index, fieldPos)
nChildren := 0
ft := Deref(f.Type)
if ft.Kind() == reflect.Struct {
nChildren = ft.NumField()
}
fi.Children = make([]*FieldInfo, nChildren)
queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
} else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
fi.Index = apnd(tq.fi.Index, fieldPos)
fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
}
fi.Index = apnd(tq.fi.Index, fieldPos)
fi.Parent = tq.fi
tq.fi.Children[fieldPos] = &fi
m = append(m, &fi)
}
}
flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
for _, fi := range flds.Index {
flds.Paths[fi.Path] = fi
if fi.Name != "" && !fi.Embedded {
flds.Names[fi.Path] = fi
}
}
return flds
}

1045
vendor/github.com/jmoiron/sqlx/sqlx.go generated vendored

File diff suppressed because it is too large Load Diff

View File

@ -1,346 +0,0 @@
// +build go1.8
package sqlx
import (
"context"
"database/sql"
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
)
// ConnectContext to a database and verify with a ping.
func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) {
db, err := Open(driverName, dataSourceName)
if err != nil {
return db, err
}
err = db.PingContext(ctx)
return db, err
}
// QueryerContext is an interface used by GetContext and SelectContext
type QueryerContext interface {
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error)
QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row
}
// PreparerContext is an interface used by PreparexContext.
type PreparerContext interface {
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
}
// ExecerContext is an interface used by MustExecContext and LoadFileContext
type ExecerContext interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
}
// ExtContext is a union interface which can bind, query, and exec, with Context
// used by NamedQueryContext and NamedExecContext.
type ExtContext interface {
binder
QueryerContext
ExecerContext
}
// SelectContext executes a query using the provided Queryer, and StructScans
// each row into dest, which must be a slice. If the slice elements are
// scannable, then the result set must have only one column. Otherwise,
// StructScan is used. The *sql.Rows are closed automatically.
// Any placeholder parameters are replaced with supplied args.
func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
rows, err := q.QueryxContext(ctx, query, args...)
if err != nil {
return err
}
// if something happens here, we want to make sure the rows are Closed
defer rows.Close()
return scanAll(rows, dest, false)
}
// PreparexContext prepares a statement.
//
// The provided context is used for the preparation of the statement, not for
// the execution of the statement.
func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) {
s, err := p.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
}
// GetContext does a QueryRow using the provided Queryer, and scans the
// resulting row to dest. If dest is scannable, the result must only have one
// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like
// row.Scan would. Any placeholder parameters are replaced with supplied args.
// An error is returned if the result set is empty.
func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
r := q.QueryRowxContext(ctx, query, args...)
return r.scanAny(dest, false)
}
// LoadFileContext exec's every statement in a file (as a single call to Exec).
// LoadFileContext may return a nil *sql.Result if errors are encountered
// locating or reading the file at path. LoadFile reads the entire file into
// memory, so it is not suitable for loading large data dumps, but can be useful
// for initializing schemas or loading indexes.
//
// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
// this by requiring something with DriverName() and then attempting to split the
// queries will be difficult to get right, and its current driver-specific behavior
// is deemed at least not complex in its incorrectness.
func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) {
realpath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
contents, err := ioutil.ReadFile(realpath)
if err != nil {
return nil, err
}
res, err := e.ExecContext(ctx, string(contents))
return &res, err
}
// MustExecContext execs the query using e and panics if there was an error.
// Any placeholder parameters are replaced with supplied args.
func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result {
res, err := e.ExecContext(ctx, query, args...)
if err != nil {
panic(err)
}
return res
}
// PrepareNamedContext returns an sqlx.NamedStmt
func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
return prepareNamedContext(ctx, db, query)
}
// NamedQueryContext using this DB.
// Any named placeholder parameters are replaced with fields from arg.
func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) {
return NamedQueryContext(ctx, db, query, arg)
}
// NamedExecContext using this DB.
// Any named placeholder parameters are replaced with fields from arg.
func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
return NamedExecContext(ctx, db, query, arg)
}
// SelectContext using this DB.
// Any placeholder parameters are replaced with supplied args.
func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return SelectContext(ctx, db, dest, query, args...)
}
// GetContext using this DB.
// Any placeholder parameters are replaced with supplied args.
// An error is returned if the result set is empty.
func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return GetContext(ctx, db, dest, query, args...)
}
// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
//
// The provided context is used for the preparation of the statement, not for
// the execution of the statement.
func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
return PreparexContext(ctx, db, query)
}
// QueryxContext queries the database and returns an *sqlx.Rows.
// Any placeholder parameters are replaced with supplied args.
func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
r, err := db.DB.QueryContext(ctx, query, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
}
// QueryRowxContext queries the database and returns an *sqlx.Row.
// Any placeholder parameters are replaced with supplied args.
func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
rows, err := db.DB.QueryContext(ctx, query, args...)
return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
}
// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead
// of an *sql.Tx.
//
// The provided context is used until the transaction is committed or rolled
// back. If the context is canceled, the sql package will roll back the
// transaction. Tx.Commit will return an error if the context provided to
// MustBeginContext is canceled.
func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx {
tx, err := db.BeginTxx(ctx, opts)
if err != nil {
panic(err)
}
return tx
}
// MustExecContext (panic) runs MustExec using this database.
// Any placeholder parameters are replaced with supplied args.
func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
return MustExecContext(ctx, db, query, args...)
}
// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an
// *sql.Tx.
//
// The provided context is used until the transaction is committed or rolled
// back. If the context is canceled, the sql package will roll back the
// transaction. Tx.Commit will return an error if the context provided to
// BeginxContext is canceled.
func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
tx, err := db.DB.BeginTx(ctx, opts)
if err != nil {
return nil, err
}
return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
}
// StmtxContext returns a version of the prepared statement which runs within a
// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt.
func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt {
var s *sql.Stmt
switch v := stmt.(type) {
case Stmt:
s = v.Stmt
case *Stmt:
s = v.Stmt
case *sql.Stmt:
s = v
default:
panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
}
return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper}
}
// NamedStmtContext returns a version of the prepared statement which runs
// within a transaction.
func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt {
return &NamedStmt{
QueryString: stmt.QueryString,
Params: stmt.Params,
Stmt: tx.StmtxContext(ctx, stmt.Stmt),
}
}
// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
//
// The provided context is used for the preparation of the statement, not for
// the execution of the statement.
func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
return PreparexContext(ctx, tx, query)
}
// PrepareNamedContext returns an sqlx.NamedStmt
func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
return prepareNamedContext(ctx, tx, query)
}
// MustExecContext runs MustExecContext within a transaction.
// Any placeholder parameters are replaced with supplied args.
func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
return MustExecContext(ctx, tx, query, args...)
}
// QueryxContext within a transaction and context.
// Any placeholder parameters are replaced with supplied args.
func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
r, err := tx.Tx.QueryContext(ctx, query, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
}
// SelectContext within a transaction and context.
// Any placeholder parameters are replaced with supplied args.
func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return SelectContext(ctx, tx, dest, query, args...)
}
// GetContext within a transaction and context.
// Any placeholder parameters are replaced with supplied args.
// An error is returned if the result set is empty.
func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
return GetContext(ctx, tx, dest, query, args...)
}
// QueryRowxContext within a transaction and context.
// Any placeholder parameters are replaced with supplied args.
func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
rows, err := tx.Tx.QueryContext(ctx, query, args...)
return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
}
// NamedExecContext using this Tx.
// Any named placeholder parameters are replaced with fields from arg.
func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
return NamedExecContext(ctx, tx, query, arg)
}
// SelectContext using the prepared statement.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error {
return SelectContext(ctx, &qStmt{s}, dest, "", args...)
}
// GetContext using the prepared statement.
// Any placeholder parameters are replaced with supplied args.
// An error is returned if the result set is empty.
func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error {
return GetContext(ctx, &qStmt{s}, dest, "", args...)
}
// MustExecContext (panic) using this statement. Note that the query portion of
// the error output will be blank, as Stmt does not expose its query.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result {
return MustExecContext(ctx, &qStmt{s}, "", args...)
}
// QueryRowxContext using this statement.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row {
qs := &qStmt{s}
return qs.QueryRowxContext(ctx, "", args...)
}
// QueryxContext using this statement.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) {
qs := &qStmt{s}
return qs.QueryxContext(ctx, "", args...)
}
func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
return q.Stmt.QueryContext(ctx, args...)
}
func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
r, err := q.Stmt.QueryContext(ctx, args...)
if err != nil {
return nil, err
}
return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
}
func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
rows, err := q.Stmt.QueryContext(ctx, args...)
return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
}
func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
return q.Stmt.ExecContext(ctx, args...)
}

24
vendor/github.com/syndtr/goleveldb/LICENSE generated vendored Normal file
View File

@ -0,0 +1,24 @@
Copyright 2012 Suryandaru Triandana <syndtr@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

349
vendor/github.com/syndtr/goleveldb/leveldb/batch.go generated vendored Normal file
View File

@ -0,0 +1,349 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"encoding/binary"
"fmt"
"io"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/storage"
)
// ErrBatchCorrupted records reason of batch corruption. This error will be
// wrapped with errors.ErrCorrupted.
type ErrBatchCorrupted struct {
Reason string
}
func (e *ErrBatchCorrupted) Error() string {
return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
}
func newErrBatchCorrupted(reason string) error {
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
}
const (
batchHeaderLen = 8 + 4
batchGrowRec = 3000
batchBufioSize = 16
)
// BatchReplay wraps basic batch operations.
type BatchReplay interface {
Put(key, value []byte)
Delete(key []byte)
}
type batchIndex struct {
keyType keyType
keyPos, keyLen int
valuePos, valueLen int
}
func (index batchIndex) k(data []byte) []byte {
return data[index.keyPos : index.keyPos+index.keyLen]
}
func (index batchIndex) v(data []byte) []byte {
if index.valueLen != 0 {
return data[index.valuePos : index.valuePos+index.valueLen]
}
return nil
}
func (index batchIndex) kv(data []byte) (key, value []byte) {
return index.k(data), index.v(data)
}
// Batch is a write batch.
type Batch struct {
data []byte
index []batchIndex
// internalLen is sums of key/value pair length plus 8-bytes internal key.
internalLen int
}
func (b *Batch) grow(n int) {
o := len(b.data)
if cap(b.data)-o < n {
div := 1
if len(b.index) > batchGrowRec {
div = len(b.index) / batchGrowRec
}
ndata := make([]byte, o, o+n+o/div)
copy(ndata, b.data)
b.data = ndata
}
}
func (b *Batch) appendRec(kt keyType, key, value []byte) {
n := 1 + binary.MaxVarintLen32 + len(key)
if kt == keyTypeVal {
n += binary.MaxVarintLen32 + len(value)
}
b.grow(n)
index := batchIndex{keyType: kt}
o := len(b.data)
data := b.data[:o+n]
data[o] = byte(kt)
o++
o += binary.PutUvarint(data[o:], uint64(len(key)))
index.keyPos = o
index.keyLen = len(key)
o += copy(data[o:], key)
if kt == keyTypeVal {
o += binary.PutUvarint(data[o:], uint64(len(value)))
index.valuePos = o
index.valueLen = len(value)
o += copy(data[o:], value)
}
b.data = data[:o]
b.index = append(b.index, index)
b.internalLen += index.keyLen + index.valueLen + 8
}
// Put appends 'put operation' of the given key/value pair to the batch.
// It is safe to modify the contents of the argument after Put returns but not
// before.
func (b *Batch) Put(key, value []byte) {
b.appendRec(keyTypeVal, key, value)
}
// Delete appends 'delete operation' of the given key to the batch.
// It is safe to modify the contents of the argument after Delete returns but
// not before.
func (b *Batch) Delete(key []byte) {
b.appendRec(keyTypeDel, key, nil)
}
// Dump dumps batch contents. The returned slice can be loaded into the
// batch using Load method.
// The returned slice is not its own copy, so the contents should not be
// modified.
func (b *Batch) Dump() []byte {
return b.data
}
// Load loads given slice into the batch. Previous contents of the batch
// will be discarded.
// The given slice will not be copied and will be used as batch buffer, so
// it is not safe to modify the contents of the slice.
func (b *Batch) Load(data []byte) error {
return b.decode(data, -1)
}
// Replay replays batch contents.
func (b *Batch) Replay(r BatchReplay) error {
for _, index := range b.index {
switch index.keyType {
case keyTypeVal:
r.Put(index.k(b.data), index.v(b.data))
case keyTypeDel:
r.Delete(index.k(b.data))
}
}
return nil
}
// Len returns number of records in the batch.
func (b *Batch) Len() int {
return len(b.index)
}
// Reset resets the batch.
func (b *Batch) Reset() {
b.data = b.data[:0]
b.index = b.index[:0]
b.internalLen = 0
}
func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error {
for i, index := range b.index {
if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil {
return err
}
}
return nil
}
func (b *Batch) append(p *Batch) {
ob := len(b.data)
oi := len(b.index)
b.data = append(b.data, p.data...)
b.index = append(b.index, p.index...)
b.internalLen += p.internalLen
// Updating index offset.
if ob != 0 {
for ; oi < len(b.index); oi++ {
index := &b.index[oi]
index.keyPos += ob
if index.valueLen != 0 {
index.valuePos += ob
}
}
}
}
func (b *Batch) decode(data []byte, expectedLen int) error {
b.data = data
b.index = b.index[:0]
b.internalLen = 0
err := decodeBatch(data, func(i int, index batchIndex) error {
b.index = append(b.index, index)
b.internalLen += index.keyLen + index.valueLen + 8
return nil
})
if err != nil {
return err
}
if expectedLen >= 0 && len(b.index) != expectedLen {
return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index)))
}
return nil
}
func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error {
var ik []byte
for i, index := range b.index {
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
if err := mdb.Put(ik, index.v(b.data)); err != nil {
return err
}
}
return nil
}
func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error {
var ik []byte
for i, index := range b.index {
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
if err := mdb.Delete(ik); err != nil {
return err
}
}
return nil
}
func newBatch() interface{} {
return &Batch{}
}
func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error {
var index batchIndex
for i, o := 0, 0; o < len(data); i++ {
// Key type.
index.keyType = keyType(data[o])
if index.keyType > keyTypeVal {
return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType)))
}
o++
// Key.
x, n := binary.Uvarint(data[o:])
o += n
if n <= 0 || o+int(x) > len(data) {
return newErrBatchCorrupted("bad record: invalid key length")
}
index.keyPos = o
index.keyLen = int(x)
o += index.keyLen
// Value.
if index.keyType == keyTypeVal {
x, n = binary.Uvarint(data[o:])
o += n
if n <= 0 || o+int(x) > len(data) {
return newErrBatchCorrupted("bad record: invalid value length")
}
index.valuePos = o
index.valueLen = int(x)
o += index.valueLen
} else {
index.valuePos = 0
index.valueLen = 0
}
if err := fn(i, index); err != nil {
return err
}
}
return nil
}
func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) {
seq, batchLen, err = decodeBatchHeader(data)
if err != nil {
return 0, 0, err
}
if seq < expectSeq {
return 0, 0, newErrBatchCorrupted("invalid sequence number")
}
data = data[batchHeaderLen:]
var ik []byte
var decodedLen int
err = decodeBatch(data, func(i int, index batchIndex) error {
if i >= batchLen {
return newErrBatchCorrupted("invalid records length")
}
ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType)
if err := mdb.Put(ik, index.v(data)); err != nil {
return err
}
decodedLen++
return nil
})
if err == nil && decodedLen != batchLen {
err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen))
}
return
}
func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte {
dst = ensureBuffer(dst, batchHeaderLen)
binary.LittleEndian.PutUint64(dst, seq)
binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen))
return dst
}
func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) {
if len(data) < batchHeaderLen {
return 0, 0, newErrBatchCorrupted("too short")
}
seq = binary.LittleEndian.Uint64(data)
batchLen = int(binary.LittleEndian.Uint32(data[8:]))
if batchLen < 0 {
return 0, 0, newErrBatchCorrupted("invalid records length")
}
return
}
func batchesLen(batches []*Batch) int {
batchLen := 0
for _, batch := range batches {
batchLen += batch.Len()
}
return batchLen
}
func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error {
if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil {
return err
}
for _, batch := range batches {
if _, err := wr.Write(batch.data); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,704 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package cache provides interface and implementation of a cache algorithms.
package cache
import (
"sync"
"sync/atomic"
"unsafe"
"github.com/syndtr/goleveldb/leveldb/util"
)
// Cacher provides interface to implements a caching functionality.
// An implementation must be safe for concurrent use.
type Cacher interface {
// Capacity returns cache capacity.
Capacity() int
// SetCapacity sets cache capacity.
SetCapacity(capacity int)
// Promote promotes the 'cache node'.
Promote(n *Node)
// Ban evicts the 'cache node' and prevent subsequent 'promote'.
Ban(n *Node)
// Evict evicts the 'cache node'.
Evict(n *Node)
// EvictNS evicts 'cache node' with the given namespace.
EvictNS(ns uint64)
// EvictAll evicts all 'cache node'.
EvictAll()
// Close closes the 'cache tree'
Close() error
}
// Value is a 'cacheable object'. It may implements util.Releaser, if
// so the the Release method will be called once object is released.
type Value interface{}
// NamespaceGetter provides convenient wrapper for namespace.
type NamespaceGetter struct {
Cache *Cache
NS uint64
}
// Get simply calls Cache.Get() method.
func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
return g.Cache.Get(g.NS, key, setFunc)
}
// The hash tables implementation is based on:
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
// Kunlong Zhang, and Michael Spear.
// ACM Symposium on Principles of Distributed Computing, Jul 2014.
const (
mInitialSize = 1 << 4
mOverflowThreshold = 1 << 5
mOverflowGrowThreshold = 1 << 7
)
type mBucket struct {
mu sync.Mutex
node []*Node
frozen bool
}
func (b *mBucket) freeze() []*Node {
b.mu.Lock()
defer b.mu.Unlock()
if !b.frozen {
b.frozen = true
}
return b.node
}
func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
b.mu.Lock()
if b.frozen {
b.mu.Unlock()
return
}
// Scan the node.
for _, n := range b.node {
if n.hash == hash && n.ns == ns && n.key == key {
atomic.AddInt32(&n.ref, 1)
b.mu.Unlock()
return true, false, n
}
}
// Get only.
if noset {
b.mu.Unlock()
return true, false, nil
}
// Create node.
n = &Node{
r: r,
hash: hash,
ns: ns,
key: key,
ref: 1,
}
// Add node to bucket.
b.node = append(b.node, n)
bLen := len(b.node)
b.mu.Unlock()
// Update counter.
grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
if bLen > mOverflowThreshold {
grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
}
// Grow.
if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
nhLen := len(h.buckets) << 1
nh := &mNode{
buckets: make([]unsafe.Pointer, nhLen),
mask: uint32(nhLen) - 1,
pred: unsafe.Pointer(h),
growThreshold: int32(nhLen * mOverflowThreshold),
shrinkThreshold: int32(nhLen >> 1),
}
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
if !ok {
panic("BUG: failed swapping head")
}
go nh.initBuckets()
}
return true, true, n
}
func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
b.mu.Lock()
if b.frozen {
b.mu.Unlock()
return
}
// Scan the node.
var (
n *Node
bLen int
)
for i := range b.node {
n = b.node[i]
if n.ns == ns && n.key == key {
if atomic.LoadInt32(&n.ref) == 0 {
deleted = true
// Call releaser.
if n.value != nil {
if r, ok := n.value.(util.Releaser); ok {
r.Release()
}
n.value = nil
}
// Remove node from bucket.
b.node = append(b.node[:i], b.node[i+1:]...)
bLen = len(b.node)
}
break
}
}
b.mu.Unlock()
if deleted {
// Call OnDel.
for _, f := range n.onDel {
f()
}
// Update counter.
atomic.AddInt32(&r.size, int32(n.size)*-1)
shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
if bLen >= mOverflowThreshold {
atomic.AddInt32(&h.overflow, -1)
}
// Shrink.
if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
nhLen := len(h.buckets) >> 1
nh := &mNode{
buckets: make([]unsafe.Pointer, nhLen),
mask: uint32(nhLen) - 1,
pred: unsafe.Pointer(h),
growThreshold: int32(nhLen * mOverflowThreshold),
shrinkThreshold: int32(nhLen >> 1),
}
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
if !ok {
panic("BUG: failed swapping head")
}
go nh.initBuckets()
}
}
return true, deleted
}
type mNode struct {
buckets []unsafe.Pointer // []*mBucket
mask uint32
pred unsafe.Pointer // *mNode
resizeInProgess int32
overflow int32
growThreshold int32
shrinkThreshold int32
}
func (n *mNode) initBucket(i uint32) *mBucket {
if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
return b
}
p := (*mNode)(atomic.LoadPointer(&n.pred))
if p != nil {
var node []*Node
if n.mask > p.mask {
// Grow.
pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
if pb == nil {
pb = p.initBucket(i & p.mask)
}
m := pb.freeze()
// Split nodes.
for _, x := range m {
if x.hash&n.mask == i {
node = append(node, x)
}
}
} else {
// Shrink.
pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
if pb0 == nil {
pb0 = p.initBucket(i)
}
pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
if pb1 == nil {
pb1 = p.initBucket(i + uint32(len(n.buckets)))
}
m0 := pb0.freeze()
m1 := pb1.freeze()
// Merge nodes.
node = make([]*Node, 0, len(m0)+len(m1))
node = append(node, m0...)
node = append(node, m1...)
}
b := &mBucket{node: node}
if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
if len(node) > mOverflowThreshold {
atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
}
return b
}
}
return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
}
func (n *mNode) initBuckets() {
for i := range n.buckets {
n.initBucket(uint32(i))
}
atomic.StorePointer(&n.pred, nil)
}
// Cache is a 'cache map'.
type Cache struct {
mu sync.RWMutex
mHead unsafe.Pointer // *mNode
nodes int32
size int32
cacher Cacher
closed bool
}
// NewCache creates a new 'cache map'. The cacher is optional and
// may be nil.
func NewCache(cacher Cacher) *Cache {
h := &mNode{
buckets: make([]unsafe.Pointer, mInitialSize),
mask: mInitialSize - 1,
growThreshold: int32(mInitialSize * mOverflowThreshold),
shrinkThreshold: 0,
}
for i := range h.buckets {
h.buckets[i] = unsafe.Pointer(&mBucket{})
}
r := &Cache{
mHead: unsafe.Pointer(h),
cacher: cacher,
}
return r
}
func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
h := (*mNode)(atomic.LoadPointer(&r.mHead))
i := hash & h.mask
b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
if b == nil {
b = h.initBucket(i)
}
return h, b
}
func (r *Cache) delete(n *Node) bool {
for {
h, b := r.getBucket(n.hash)
done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
if done {
return deleted
}
}
}
// Nodes returns number of 'cache node' in the map.
func (r *Cache) Nodes() int {
return int(atomic.LoadInt32(&r.nodes))
}
// Size returns sums of 'cache node' size in the map.
func (r *Cache) Size() int {
return int(atomic.LoadInt32(&r.size))
}
// Capacity returns cache capacity.
func (r *Cache) Capacity() int {
if r.cacher == nil {
return 0
}
return r.cacher.Capacity()
}
// SetCapacity sets cache capacity.
func (r *Cache) SetCapacity(capacity int) {
if r.cacher != nil {
r.cacher.SetCapacity(capacity)
}
}
// Get gets 'cache node' with the given namespace and key.
// If cache node is not found and setFunc is not nil, Get will atomically creates
// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
//
// The returned 'cache handle' should be released after use by calling Release
// method.
func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return nil
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
if done {
if n != nil {
n.mu.Lock()
if n.value == nil {
if setFunc == nil {
n.mu.Unlock()
n.unref()
return nil
}
n.size, n.value = setFunc()
if n.value == nil {
n.size = 0
n.mu.Unlock()
n.unref()
return nil
}
atomic.AddInt32(&r.size, int32(n.size))
}
n.mu.Unlock()
if r.cacher != nil {
r.cacher.Promote(n)
}
return &Handle{unsafe.Pointer(n)}
}
break
}
}
return nil
}
// Delete removes and ban 'cache node' with the given namespace and key.
// A banned 'cache node' will never inserted into the 'cache tree'. Ban
// only attributed to the particular 'cache node', so when a 'cache node'
// is recreated it will not be banned.
//
// If onDel is not nil, then it will be executed if such 'cache node'
// doesn't exist or once the 'cache node' is released.
//
// Delete return true is such 'cache node' exist.
func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return false
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, true)
if done {
if n != nil {
if onDel != nil {
n.mu.Lock()
n.onDel = append(n.onDel, onDel)
n.mu.Unlock()
}
if r.cacher != nil {
r.cacher.Ban(n)
}
n.unref()
return true
}
break
}
}
if onDel != nil {
onDel()
}
return false
}
// Evict evicts 'cache node' with the given namespace and key. This will
// simply call Cacher.Evict.
//
// Evict return true is such 'cache node' exist.
func (r *Cache) Evict(ns, key uint64) bool {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return false
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, true)
if done {
if n != nil {
if r.cacher != nil {
r.cacher.Evict(n)
}
n.unref()
return true
}
break
}
}
return false
}
// EvictNS evicts 'cache node' with the given namespace. This will
// simply call Cacher.EvictNS.
func (r *Cache) EvictNS(ns uint64) {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return
}
if r.cacher != nil {
r.cacher.EvictNS(ns)
}
}
// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
func (r *Cache) EvictAll() {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return
}
if r.cacher != nil {
r.cacher.EvictAll()
}
}
// Close closes the 'cache map' and forcefully releases all 'cache node'.
func (r *Cache) Close() error {
r.mu.Lock()
if !r.closed {
r.closed = true
h := (*mNode)(r.mHead)
h.initBuckets()
for i := range h.buckets {
b := (*mBucket)(h.buckets[i])
for _, n := range b.node {
// Call releaser.
if n.value != nil {
if r, ok := n.value.(util.Releaser); ok {
r.Release()
}
n.value = nil
}
// Call OnDel.
for _, f := range n.onDel {
f()
}
n.onDel = nil
}
}
}
r.mu.Unlock()
// Avoid deadlock.
if r.cacher != nil {
if err := r.cacher.Close(); err != nil {
return err
}
}
return nil
}
// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
// unlike Close it doesn't forcefully releases 'cache node'.
func (r *Cache) CloseWeak() error {
r.mu.Lock()
if !r.closed {
r.closed = true
}
r.mu.Unlock()
// Avoid deadlock.
if r.cacher != nil {
r.cacher.EvictAll()
if err := r.cacher.Close(); err != nil {
return err
}
}
return nil
}
// Node is a 'cache node'.
type Node struct {
r *Cache
hash uint32
ns, key uint64
mu sync.Mutex
size int
value Value
ref int32
onDel []func()
CacheData unsafe.Pointer
}
// NS returns this 'cache node' namespace.
func (n *Node) NS() uint64 {
return n.ns
}
// Key returns this 'cache node' key.
func (n *Node) Key() uint64 {
return n.key
}
// Size returns this 'cache node' size.
func (n *Node) Size() int {
return n.size
}
// Value returns this 'cache node' value.
func (n *Node) Value() Value {
return n.value
}
// Ref returns this 'cache node' ref counter.
func (n *Node) Ref() int32 {
return atomic.LoadInt32(&n.ref)
}
// GetHandle returns an handle for this 'cache node'.
func (n *Node) GetHandle() *Handle {
if atomic.AddInt32(&n.ref, 1) <= 1 {
panic("BUG: Node.GetHandle on zero ref")
}
return &Handle{unsafe.Pointer(n)}
}
func (n *Node) unref() {
if atomic.AddInt32(&n.ref, -1) == 0 {
n.r.delete(n)
}
}
func (n *Node) unrefLocked() {
if atomic.AddInt32(&n.ref, -1) == 0 {
n.r.mu.RLock()
if !n.r.closed {
n.r.delete(n)
}
n.r.mu.RUnlock()
}
}
// Handle is a 'cache handle' of a 'cache node'.
type Handle struct {
n unsafe.Pointer // *Node
}
// Value returns the value of the 'cache node'.
func (h *Handle) Value() Value {
n := (*Node)(atomic.LoadPointer(&h.n))
if n != nil {
return n.value
}
return nil
}
// Release releases this 'cache handle'.
// It is safe to call release multiple times.
func (h *Handle) Release() {
nPtr := atomic.LoadPointer(&h.n)
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
n := (*Node)(nPtr)
n.unrefLocked()
}
}
func murmur32(ns, key uint64, seed uint32) uint32 {
const (
m = uint32(0x5bd1e995)
r = 24
)
k1 := uint32(ns >> 32)
k2 := uint32(ns)
k3 := uint32(key >> 32)
k4 := uint32(key)
k1 *= m
k1 ^= k1 >> r
k1 *= m
k2 *= m
k2 ^= k2 >> r
k2 *= m
k3 *= m
k3 ^= k3 >> r
k3 *= m
k4 *= m
k4 ^= k4 >> r
k4 *= m
h := seed
h *= m
h ^= k1
h *= m
h ^= k2
h *= m
h ^= k3
h *= m
h ^= k4
h ^= h >> 13
h *= m
h ^= h >> 15
return h
}

195
vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go generated vendored Normal file
View File

@ -0,0 +1,195 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cache
import (
"sync"
"unsafe"
)
type lruNode struct {
n *Node
h *Handle
ban bool
next, prev *lruNode
}
func (n *lruNode) insert(at *lruNode) {
x := at.next
at.next = n
n.prev = at
n.next = x
x.prev = n
}
func (n *lruNode) remove() {
if n.prev != nil {
n.prev.next = n.next
n.next.prev = n.prev
n.prev = nil
n.next = nil
} else {
panic("BUG: removing removed node")
}
}
type lru struct {
mu sync.Mutex
capacity int
used int
recent lruNode
}
func (r *lru) reset() {
r.recent.next = &r.recent
r.recent.prev = &r.recent
r.used = 0
}
func (r *lru) Capacity() int {
r.mu.Lock()
defer r.mu.Unlock()
return r.capacity
}
func (r *lru) SetCapacity(capacity int) {
var evicted []*lruNode
r.mu.Lock()
r.capacity = capacity
for r.used > r.capacity {
rn := r.recent.prev
if rn == nil {
panic("BUG: invalid LRU used or capacity counter")
}
rn.remove()
rn.n.CacheData = nil
r.used -= rn.n.Size()
evicted = append(evicted, rn)
}
r.mu.Unlock()
for _, rn := range evicted {
rn.h.Release()
}
}
func (r *lru) Promote(n *Node) {
var evicted []*lruNode
r.mu.Lock()
if n.CacheData == nil {
if n.Size() <= r.capacity {
rn := &lruNode{n: n, h: n.GetHandle()}
rn.insert(&r.recent)
n.CacheData = unsafe.Pointer(rn)
r.used += n.Size()
for r.used > r.capacity {
rn := r.recent.prev
if rn == nil {
panic("BUG: invalid LRU used or capacity counter")
}
rn.remove()
rn.n.CacheData = nil
r.used -= rn.n.Size()
evicted = append(evicted, rn)
}
}
} else {
rn := (*lruNode)(n.CacheData)
if !rn.ban {
rn.remove()
rn.insert(&r.recent)
}
}
r.mu.Unlock()
for _, rn := range evicted {
rn.h.Release()
}
}
func (r *lru) Ban(n *Node) {
r.mu.Lock()
if n.CacheData == nil {
n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
} else {
rn := (*lruNode)(n.CacheData)
if !rn.ban {
rn.remove()
rn.ban = true
r.used -= rn.n.Size()
r.mu.Unlock()
rn.h.Release()
rn.h = nil
return
}
}
r.mu.Unlock()
}
func (r *lru) Evict(n *Node) {
r.mu.Lock()
rn := (*lruNode)(n.CacheData)
if rn == nil || rn.ban {
r.mu.Unlock()
return
}
n.CacheData = nil
r.mu.Unlock()
rn.h.Release()
}
func (r *lru) EvictNS(ns uint64) {
var evicted []*lruNode
r.mu.Lock()
for e := r.recent.prev; e != &r.recent; {
rn := e
e = e.prev
if rn.n.NS() == ns {
rn.remove()
rn.n.CacheData = nil
r.used -= rn.n.Size()
evicted = append(evicted, rn)
}
}
r.mu.Unlock()
for _, rn := range evicted {
rn.h.Release()
}
}
func (r *lru) EvictAll() {
r.mu.Lock()
back := r.recent.prev
for rn := back; rn != &r.recent; rn = rn.prev {
rn.n.CacheData = nil
}
r.reset()
r.mu.Unlock()
for rn := back; rn != &r.recent; rn = rn.prev {
rn.h.Release()
}
}
func (r *lru) Close() error {
return nil
}
// NewLRU create a new LRU-cache.
func NewLRU(capacity int) Cacher {
r := &lru{capacity: capacity}
r.reset()
return r
}

67
vendor/github.com/syndtr/goleveldb/leveldb/comparer.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"github.com/syndtr/goleveldb/leveldb/comparer"
)
type iComparer struct {
ucmp comparer.Comparer
}
func (icmp *iComparer) uName() string {
return icmp.ucmp.Name()
}
func (icmp *iComparer) uCompare(a, b []byte) int {
return icmp.ucmp.Compare(a, b)
}
func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
return icmp.ucmp.Separator(dst, a, b)
}
func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
return icmp.ucmp.Successor(dst, b)
}
func (icmp *iComparer) Name() string {
return icmp.uName()
}
func (icmp *iComparer) Compare(a, b []byte) int {
x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey())
if x == 0 {
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
return -1
} else if m < n {
return 1
}
}
return x
}
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
dst = icmp.uSeparator(dst, ua, ub)
if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
// Append earliest possible number.
return append(dst, keyMaxNumBytes...)
}
return nil
}
func (icmp *iComparer) Successor(dst, b []byte) []byte {
ub := internalKey(b).ukey()
dst = icmp.uSuccessor(dst, ub)
if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
// Append earliest possible number.
return append(dst, keyMaxNumBytes...)
}
return nil
}

View File

@ -0,0 +1,51 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package comparer
import "bytes"
type bytesComparer struct{}
func (bytesComparer) Compare(a, b []byte) int {
return bytes.Compare(a, b)
}
func (bytesComparer) Name() string {
return "leveldb.BytewiseComparator"
}
func (bytesComparer) Separator(dst, a, b []byte) []byte {
i, n := 0, len(a)
if n > len(b) {
n = len(b)
}
for ; i < n && a[i] == b[i]; i++ {
}
if i >= n {
// Do not shorten if one string is a prefix of the other
} else if c := a[i]; c < 0xff && c+1 < b[i] {
dst = append(dst, a[:i+1]...)
dst[len(dst)-1]++
return dst
}
return nil
}
func (bytesComparer) Successor(dst, b []byte) []byte {
for i, c := range b {
if c != 0xff {
dst = append(dst, b[:i+1]...)
dst[len(dst)-1]++
return dst
}
}
return nil
}
// DefaultComparer are default implementation of the Comparer interface.
// It uses the natural ordering, consistent with bytes.Compare.
var DefaultComparer = bytesComparer{}

View File

@ -0,0 +1,57 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package comparer provides interface and implementation for ordering
// sets of data.
package comparer
// BasicComparer is the interface that wraps the basic Compare method.
type BasicComparer interface {
// Compare returns -1, 0, or +1 depending on whether a is 'less than',
// 'equal to' or 'greater than' b. The two arguments can only be 'equal'
// if their contents are exactly equal. Furthermore, the empty slice
// must be 'less than' any non-empty slice.
Compare(a, b []byte) int
}
// Comparer defines a total ordering over the space of []byte keys: a 'less
// than' relationship.
type Comparer interface {
BasicComparer
// Name returns name of the comparer.
//
// The Level-DB on-disk format stores the comparer name, and opening a
// database with a different comparer from the one it was created with
// will result in an error.
//
// An implementation to a new name whenever the comparer implementation
// changes in a way that will cause the relative ordering of any two keys
// to change.
//
// Names starting with "leveldb." are reserved and should not be used
// by any users of this package.
Name() string
// Bellow are advanced functions used to reduce the space requirements
// for internal data structures such as index blocks.
// Separator appends a sequence of bytes x to dst such that a <= x && x < b,
// where 'less than' is consistent with Compare. An implementation should
// return nil if x equal to a.
//
// Either contents of a or b should not by any means modified. Doing so
// may cause corruption on the internal state.
Separator(dst, a, b []byte) []byte
// Successor appends a sequence of bytes x to dst such that x >= b, where
// 'less than' is consistent with Compare. An implementation should return
// nil if x equal to b.
//
// Contents of b should not by any means modified. Doing so may cause
// corruption on the internal state.
Successor(dst, b []byte) []byte
}

1179
vendor/github.com/syndtr/goleveldb/leveldb/db.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,854 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"sync"
"time"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
)
var (
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
)
type cStat struct {
duration time.Duration
read int64
write int64
}
func (p *cStat) add(n *cStatStaging) {
p.duration += n.duration
p.read += n.read
p.write += n.write
}
func (p *cStat) get() (duration time.Duration, read, write int64) {
return p.duration, p.read, p.write
}
type cStatStaging struct {
start time.Time
duration time.Duration
on bool
read int64
write int64
}
func (p *cStatStaging) startTimer() {
if !p.on {
p.start = time.Now()
p.on = true
}
}
func (p *cStatStaging) stopTimer() {
if p.on {
p.duration += time.Since(p.start)
p.on = false
}
}
type cStats struct {
lk sync.Mutex
stats []cStat
}
func (p *cStats) addStat(level int, n *cStatStaging) {
p.lk.Lock()
if level >= len(p.stats) {
newStats := make([]cStat, level+1)
copy(newStats, p.stats)
p.stats = newStats
}
p.stats[level].add(n)
p.lk.Unlock()
}
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
p.lk.Lock()
defer p.lk.Unlock()
if level < len(p.stats) {
return p.stats[level].get()
}
return
}
func (db *DB) compactionError() {
var err error
noerr:
// No error.
for {
select {
case err = <-db.compErrSetC:
switch {
case err == nil:
case err == ErrReadOnly, errors.IsCorrupted(err):
goto hasperr
default:
goto haserr
}
case <-db.closeC:
return
}
}
haserr:
// Transient error.
for {
select {
case db.compErrC <- err:
case err = <-db.compErrSetC:
switch {
case err == nil:
goto noerr
case err == ErrReadOnly, errors.IsCorrupted(err):
goto hasperr
default:
}
case <-db.closeC:
return
}
}
hasperr:
// Persistent error.
for {
select {
case db.compErrC <- err:
case db.compPerErrC <- err:
case db.writeLockC <- struct{}{}:
// Hold write lock, so that write won't pass-through.
db.compWriteLocking = true
case <-db.closeC:
if db.compWriteLocking {
// We should release the lock or Close will hang.
<-db.writeLockC
}
return
}
}
}
type compactionTransactCounter int
func (cnt *compactionTransactCounter) incr() {
*cnt++
}
type compactionTransactInterface interface {
run(cnt *compactionTransactCounter) error
revert() error
}
func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
defer func() {
if x := recover(); x != nil {
if x == errCompactionTransactExiting {
if err := t.revert(); err != nil {
db.logf("%s revert error %q", name, err)
}
}
panic(x)
}
}()
const (
backoffMin = 1 * time.Second
backoffMax = 8 * time.Second
backoffMul = 2 * time.Second
)
var (
backoff = backoffMin
backoffT = time.NewTimer(backoff)
lastCnt = compactionTransactCounter(0)
disableBackoff = db.s.o.GetDisableCompactionBackoff()
)
for n := 0; ; n++ {
// Check whether the DB is closed.
if db.isClosed() {
db.logf("%s exiting", name)
db.compactionExitTransact()
} else if n > 0 {
db.logf("%s retrying N·%d", name, n)
}
// Execute.
cnt := compactionTransactCounter(0)
err := t.run(&cnt)
if err != nil {
db.logf("%s error I·%d %q", name, cnt, err)
}
// Set compaction error status.
select {
case db.compErrSetC <- err:
case perr := <-db.compPerErrC:
if err != nil {
db.logf("%s exiting (persistent error %q)", name, perr)
db.compactionExitTransact()
}
case <-db.closeC:
db.logf("%s exiting", name)
db.compactionExitTransact()
}
if err == nil {
return
}
if errors.IsCorrupted(err) {
db.logf("%s exiting (corruption detected)", name)
db.compactionExitTransact()
}
if !disableBackoff {
// Reset backoff duration if counter is advancing.
if cnt > lastCnt {
backoff = backoffMin
lastCnt = cnt
}
// Backoff.
backoffT.Reset(backoff)
if backoff < backoffMax {
backoff *= backoffMul
if backoff > backoffMax {
backoff = backoffMax
}
}
select {
case <-backoffT.C:
case <-db.closeC:
db.logf("%s exiting", name)
db.compactionExitTransact()
}
}
}
}
type compactionTransactFunc struct {
runFunc func(cnt *compactionTransactCounter) error
revertFunc func() error
}
func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
return t.runFunc(cnt)
}
func (t *compactionTransactFunc) revert() error {
if t.revertFunc != nil {
return t.revertFunc()
}
return nil
}
func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
db.compactionTransact(name, &compactionTransactFunc{run, revert})
}
func (db *DB) compactionExitTransact() {
panic(errCompactionTransactExiting)
}
func (db *DB) compactionCommit(name string, rec *sessionRecord) {
db.compCommitLk.Lock()
defer db.compCommitLk.Unlock() // Defer is necessary.
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
return db.s.commit(rec)
}, nil)
}
func (db *DB) memCompaction() {
mdb := db.getFrozenMem()
if mdb == nil {
return
}
defer mdb.decref()
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
// Don't compact empty memdb.
if mdb.Len() == 0 {
db.logf("memdb@flush skipping")
// drop frozen memdb
db.dropFrozenMem()
return
}
// Pause table compaction.
resumeC := make(chan struct{})
select {
case db.tcompPauseC <- (chan<- struct{})(resumeC):
case <-db.compPerErrC:
close(resumeC)
resumeC = nil
case <-db.closeC:
db.compactionExitTransact()
}
var (
rec = &sessionRecord{}
stats = &cStatStaging{}
flushLevel int
)
// Generate tables.
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
stats.stopTimer()
return
}, func() error {
for _, r := range rec.addedTables {
db.logf("memdb@flush revert @%d", r.num)
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
return err
}
}
return nil
})
rec.setJournalNum(db.journalFd.Num)
rec.setSeqNum(db.frozenSeq)
// Commit.
stats.startTimer()
db.compactionCommit("memdb", rec)
stats.stopTimer()
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
for _, r := range rec.addedTables {
stats.write += r.size
}
db.compStats.addStat(flushLevel, stats)
// Drop frozen memdb.
db.dropFrozenMem()
// Resume table compaction.
if resumeC != nil {
select {
case <-resumeC:
close(resumeC)
case <-db.closeC:
db.compactionExitTransact()
}
}
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
}
type tableCompactionBuilder struct {
db *DB
s *session
c *compaction
rec *sessionRecord
stat0, stat1 *cStatStaging
snapHasLastUkey bool
snapLastUkey []byte
snapLastSeq uint64
snapIter int
snapKerrCnt int
snapDropCnt int
kerrCnt int
dropCnt int
minSeq uint64
strict bool
tableSize int
tw *tWriter
}
func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
// Create new table if not already.
if b.tw == nil {
// Check for pause event.
if b.db != nil {
select {
case ch := <-b.db.tcompPauseC:
b.db.pauseCompaction(ch)
case <-b.db.closeC:
b.db.compactionExitTransact()
default:
}
}
// Create new table.
var err error
b.tw, err = b.s.tops.create()
if err != nil {
return err
}
}
// Write key/value into table.
return b.tw.append(key, value)
}
func (b *tableCompactionBuilder) needFlush() bool {
return b.tw.tw.BytesLen() >= b.tableSize
}
func (b *tableCompactionBuilder) flush() error {
t, err := b.tw.finish()
if err != nil {
return err
}
b.rec.addTableFile(b.c.sourceLevel+1, t)
b.stat1.write += t.size
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
b.tw = nil
return nil
}
func (b *tableCompactionBuilder) cleanup() {
if b.tw != nil {
b.tw.drop()
b.tw = nil
}
}
func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
snapResumed := b.snapIter > 0
hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
lastUkey := append([]byte{}, b.snapLastUkey...)
lastSeq := b.snapLastSeq
b.kerrCnt = b.snapKerrCnt
b.dropCnt = b.snapDropCnt
// Restore compaction state.
b.c.restore()
defer b.cleanup()
b.stat1.startTimer()
defer b.stat1.stopTimer()
iter := b.c.newIterator()
defer iter.Release()
for i := 0; iter.Next(); i++ {
// Incr transact counter.
cnt.incr()
// Skip until last state.
if i < b.snapIter {
continue
}
resumed := false
if snapResumed {
resumed = true
snapResumed = false
}
ikey := iter.Key()
ukey, seq, kt, kerr := parseInternalKey(ikey)
if kerr == nil {
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
// First occurrence of this user key.
// Only rotate tables if ukey doesn't hop across.
if b.tw != nil && (shouldStop || b.needFlush()) {
if err := b.flush(); err != nil {
return err
}
// Creates snapshot of the state.
b.c.save()
b.snapHasLastUkey = hasLastUkey
b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
b.snapLastSeq = lastSeq
b.snapIter = i
b.snapKerrCnt = b.kerrCnt
b.snapDropCnt = b.dropCnt
}
hasLastUkey = true
lastUkey = append(lastUkey[:0], ukey...)
lastSeq = keyMaxSeq
}
switch {
case lastSeq <= b.minSeq:
// Dropped because newer entry for same user key exist
fallthrough // (A)
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
// For this user key:
// (1) there is no data in higher levels
// (2) data in lower levels will have larger seq numbers
// (3) data in layers that are being compacted here and have
// smaller seq numbers will be dropped in the next
// few iterations of this loop (by rule (A) above).
// Therefore this deletion marker is obsolete and can be dropped.
lastSeq = seq
b.dropCnt++
continue
default:
lastSeq = seq
}
} else {
if b.strict {
return kerr
}
// Don't drop corrupted keys.
hasLastUkey = false
lastUkey = lastUkey[:0]
lastSeq = keyMaxSeq
b.kerrCnt++
}
if err := b.appendKV(ikey, iter.Value()); err != nil {
return err
}
}
if err := iter.Error(); err != nil {
return err
}
// Finish last table.
if b.tw != nil && !b.tw.empty() {
return b.flush()
}
return nil
}
func (b *tableCompactionBuilder) revert() error {
for _, at := range b.rec.addedTables {
b.s.logf("table@build revert @%d", at.num)
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
return err
}
}
return nil
}
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
defer c.release()
rec := &sessionRecord{}
rec.addCompPtr(c.sourceLevel, c.imax)
if !noTrivial && c.trivial() {
t := c.levels[0][0]
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
rec.delTable(c.sourceLevel, t.fd.Num)
rec.addTableFile(c.sourceLevel+1, t)
db.compactionCommit("table-move", rec)
return
}
var stats [2]cStatStaging
for i, tables := range c.levels {
for _, t := range tables {
stats[i].read += t.size
// Insert deleted tables into record
rec.delTable(c.sourceLevel+i, t.fd.Num)
}
}
sourceSize := int(stats[0].read + stats[1].read)
minSeq := db.minSeq()
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
b := &tableCompactionBuilder{
db: db,
s: db.s,
c: c,
rec: rec,
stat1: &stats[1],
minSeq: minSeq,
strict: db.s.o.GetStrict(opt.StrictCompaction),
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
}
db.compactionTransact("table@build", b)
// Commit.
stats[1].startTimer()
db.compactionCommit("table", rec)
stats[1].stopTimer()
resultSize := int(stats[1].write)
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
// Save compaction stats
for i := range stats {
db.compStats.addStat(c.sourceLevel+1, &stats[i])
}
}
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
db.logf("table@compaction range L%d %q:%q", level, umin, umax)
if level >= 0 {
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
db.tableCompaction(c, true)
}
} else {
// Retry until nothing to compact.
for {
compacted := false
// Scan for maximum level with overlapped tables.
v := db.s.version()
m := 1
for i := m; i < len(v.levels); i++ {
tables := v.levels[i]
if tables.overlaps(db.s.icmp, umin, umax, false) {
m = i
}
}
v.release()
for level := 0; level < m; level++ {
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
db.tableCompaction(c, true)
compacted = true
}
}
if !compacted {
break
}
}
}
return nil
}
func (db *DB) tableAutoCompaction() {
if c := db.s.pickCompaction(); c != nil {
db.tableCompaction(c, false)
}
}
func (db *DB) tableNeedCompaction() bool {
v := db.s.version()
defer v.release()
return v.needCompaction()
}
// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted.
func (db *DB) resumeWrite() bool {
v := db.s.version()
defer v.release()
if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() {
return true
}
return false
}
func (db *DB) pauseCompaction(ch chan<- struct{}) {
select {
case ch <- struct{}{}:
case <-db.closeC:
db.compactionExitTransact()
}
}
type cCmd interface {
ack(err error)
}
type cAuto struct {
// Note for table compaction, an non-empty ackC represents it's a compaction waiting command.
ackC chan<- error
}
func (r cAuto) ack(err error) {
if r.ackC != nil {
defer func() {
recover()
}()
r.ackC <- err
}
}
type cRange struct {
level int
min, max []byte
ackC chan<- error
}
func (r cRange) ack(err error) {
if r.ackC != nil {
defer func() {
recover()
}()
r.ackC <- err
}
}
// This will trigger auto compaction but will not wait for it.
func (db *DB) compTrigger(compC chan<- cCmd) {
select {
case compC <- cAuto{}:
default:
}
}
// This will trigger auto compaction and/or wait for all compaction to be done.
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
ch := make(chan error)
defer close(ch)
// Send cmd.
select {
case compC <- cAuto{ch}:
case err = <-db.compErrC:
return
case <-db.closeC:
return ErrClosed
}
// Wait cmd.
select {
case err = <-ch:
case err = <-db.compErrC:
case <-db.closeC:
return ErrClosed
}
return err
}
// Send range compaction request.
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
ch := make(chan error)
defer close(ch)
// Send cmd.
select {
case compC <- cRange{level, min, max, ch}:
case err := <-db.compErrC:
return err
case <-db.closeC:
return ErrClosed
}
// Wait cmd.
select {
case err = <-ch:
case err = <-db.compErrC:
case <-db.closeC:
return ErrClosed
}
return err
}
func (db *DB) mCompaction() {
var x cCmd
defer func() {
if x := recover(); x != nil {
if x != errCompactionTransactExiting {
panic(x)
}
}
if x != nil {
x.ack(ErrClosed)
}
db.closeW.Done()
}()
for {
select {
case x = <-db.mcompCmdC:
switch x.(type) {
case cAuto:
db.memCompaction()
x.ack(nil)
x = nil
default:
panic("leveldb: unknown command")
}
case <-db.closeC:
return
}
}
}
func (db *DB) tCompaction() {
var (
x cCmd
waitQ []cCmd
)
defer func() {
if x := recover(); x != nil {
if x != errCompactionTransactExiting {
panic(x)
}
}
for i := range waitQ {
waitQ[i].ack(ErrClosed)
waitQ[i] = nil
}
if x != nil {
x.ack(ErrClosed)
}
db.closeW.Done()
}()
for {
if db.tableNeedCompaction() {
select {
case x = <-db.tcompCmdC:
case ch := <-db.tcompPauseC:
db.pauseCompaction(ch)
continue
case <-db.closeC:
return
default:
}
// Resume write operation as soon as possible.
if len(waitQ) > 0 && db.resumeWrite() {
for i := range waitQ {
waitQ[i].ack(nil)
waitQ[i] = nil
}
waitQ = waitQ[:0]
}
} else {
for i := range waitQ {
waitQ[i].ack(nil)
waitQ[i] = nil
}
waitQ = waitQ[:0]
select {
case x = <-db.tcompCmdC:
case ch := <-db.tcompPauseC:
db.pauseCompaction(ch)
continue
case <-db.closeC:
return
}
}
if x != nil {
switch cmd := x.(type) {
case cAuto:
if cmd.ackC != nil {
// Check the write pause state before caching it.
if db.resumeWrite() {
x.ack(nil)
} else {
waitQ = append(waitQ, x)
}
}
case cRange:
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
default:
panic("leveldb: unknown command")
}
x = nil
}
db.tableAutoCompaction()
}
}

360
vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go generated vendored Normal file
View File

@ -0,0 +1,360 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"errors"
"math/rand"
"runtime"
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
)
type memdbReleaser struct {
once sync.Once
m *memDB
}
func (mr *memdbReleaser) Release() {
mr.once.Do(func() {
mr.m.decref()
})
}
func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
em, fm := db.getMems()
v := db.s.version()
tableIts := v.getIterators(slice, ro)
n := len(tableIts) + len(auxt) + 3
its := make([]iterator.Iterator, 0, n)
if auxm != nil {
ami := auxm.NewIterator(slice)
ami.SetReleaser(&memdbReleaser{m: auxm})
its = append(its, ami)
}
for _, t := range auxt {
its = append(its, v.s.tops.newIterator(t, slice, ro))
}
emi := em.NewIterator(slice)
emi.SetReleaser(&memdbReleaser{m: em})
its = append(its, emi)
if fm != nil {
fmi := fm.NewIterator(slice)
fmi.SetReleaser(&memdbReleaser{m: fm})
its = append(its, fmi)
}
its = append(its, tableIts...)
mi := iterator.NewMergedIterator(its, db.s.icmp, strict)
mi.SetReleaser(&versionReleaser{v: v})
return mi
}
func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
var islice *util.Range
if slice != nil {
islice = &util.Range{}
if slice.Start != nil {
islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
}
if slice.Limit != nil {
islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
}
}
rawIter := db.newRawIterator(auxm, auxt, islice, ro)
iter := &dbIter{
db: db,
icmp: db.s.icmp,
iter: rawIter,
seq: seq,
strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
key: make([]byte, 0),
value: make([]byte, 0),
}
atomic.AddInt32(&db.aliveIters, 1)
runtime.SetFinalizer(iter, (*dbIter).Release)
return iter
}
func (db *DB) iterSamplingRate() int {
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
}
type dir int
const (
dirReleased dir = iota - 1
dirSOI
dirEOI
dirBackward
dirForward
)
// dbIter represent an interator states over a database session.
type dbIter struct {
db *DB
icmp *iComparer
iter iterator.Iterator
seq uint64
strict bool
smaplingGap int
dir dir
key []byte
value []byte
err error
releaser util.Releaser
}
func (i *dbIter) sampleSeek() {
ikey := i.iter.Key()
i.smaplingGap -= len(ikey) + len(i.iter.Value())
for i.smaplingGap < 0 {
i.smaplingGap += i.db.iterSamplingRate()
i.db.sampleSeek(ikey)
}
}
func (i *dbIter) setErr(err error) {
i.err = err
i.key = nil
i.value = nil
}
func (i *dbIter) iterErr() {
if err := i.iter.Error(); err != nil {
i.setErr(err)
}
}
func (i *dbIter) Valid() bool {
return i.err == nil && i.dir > dirEOI
}
func (i *dbIter) First() bool {
if i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
if i.iter.First() {
i.dir = dirSOI
return i.next()
}
i.dir = dirEOI
i.iterErr()
return false
}
func (i *dbIter) Last() bool {
if i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
if i.iter.Last() {
return i.prev()
}
i.dir = dirSOI
i.iterErr()
return false
}
func (i *dbIter) Seek(key []byte) bool {
if i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
if i.iter.Seek(ikey) {
i.dir = dirSOI
return i.next()
}
i.dir = dirEOI
i.iterErr()
return false
}
func (i *dbIter) next() bool {
for {
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
switch kt {
case keyTypeDel:
// Skip deleted key.
i.key = append(i.key[:0], ukey...)
i.dir = dirForward
case keyTypeVal:
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...)
i.dir = dirForward
return true
}
}
}
} else if i.strict {
i.setErr(kerr)
break
}
if !i.iter.Next() {
i.dir = dirEOI
i.iterErr()
break
}
}
return false
}
func (i *dbIter) Next() bool {
if i.dir == dirEOI || i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) {
i.dir = dirEOI
i.iterErr()
return false
}
return i.next()
}
func (i *dbIter) prev() bool {
i.dir = dirBackward
del := true
if i.iter.Valid() {
for {
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
return true
}
del = (kt == keyTypeDel)
if !del {
i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...)
}
}
} else if i.strict {
i.setErr(kerr)
return false
}
if !i.iter.Prev() {
break
}
}
}
if del {
i.dir = dirSOI
i.iterErr()
return false
}
return true
}
func (i *dbIter) Prev() bool {
if i.dir == dirSOI || i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
switch i.dir {
case dirEOI:
return i.Last()
case dirForward:
for i.iter.Prev() {
if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if i.icmp.uCompare(ukey, i.key) < 0 {
goto cont
}
} else if i.strict {
i.setErr(kerr)
return false
}
}
i.dir = dirSOI
i.iterErr()
return false
}
cont:
return i.prev()
}
func (i *dbIter) Key() []byte {
if i.err != nil || i.dir <= dirEOI {
return nil
}
return i.key
}
func (i *dbIter) Value() []byte {
if i.err != nil || i.dir <= dirEOI {
return nil
}
return i.value
}
func (i *dbIter) Release() {
if i.dir != dirReleased {
// Clear the finalizer.
runtime.SetFinalizer(i, nil)
if i.releaser != nil {
i.releaser.Release()
i.releaser = nil
}
i.dir = dirReleased
i.key = nil
i.value = nil
i.iter.Release()
i.iter = nil
atomic.AddInt32(&i.db.aliveIters, -1)
i.db = nil
}
}
func (i *dbIter) SetReleaser(releaser util.Releaser) {
if i.dir == dirReleased {
panic(util.ErrReleased)
}
if i.releaser != nil && releaser != nil {
panic(util.ErrHasReleaser)
}
i.releaser = releaser
}
func (i *dbIter) Error() error {
return i.err
}

View File

@ -0,0 +1,187 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"container/list"
"fmt"
"runtime"
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
type snapshotElement struct {
seq uint64
ref int
e *list.Element
}
// Acquires a snapshot, based on latest sequence.
func (db *DB) acquireSnapshot() *snapshotElement {
db.snapsMu.Lock()
defer db.snapsMu.Unlock()
seq := db.getSeq()
if e := db.snapsList.Back(); e != nil {
se := e.Value.(*snapshotElement)
if se.seq == seq {
se.ref++
return se
} else if seq < se.seq {
panic("leveldb: sequence number is not increasing")
}
}
se := &snapshotElement{seq: seq, ref: 1}
se.e = db.snapsList.PushBack(se)
return se
}
// Releases given snapshot element.
func (db *DB) releaseSnapshot(se *snapshotElement) {
db.snapsMu.Lock()
defer db.snapsMu.Unlock()
se.ref--
if se.ref == 0 {
db.snapsList.Remove(se.e)
se.e = nil
} else if se.ref < 0 {
panic("leveldb: Snapshot: negative element reference")
}
}
// Gets minimum sequence that not being snapshotted.
func (db *DB) minSeq() uint64 {
db.snapsMu.Lock()
defer db.snapsMu.Unlock()
if e := db.snapsList.Front(); e != nil {
return e.Value.(*snapshotElement).seq
}
return db.getSeq()
}
// Snapshot is a DB snapshot.
type Snapshot struct {
db *DB
elem *snapshotElement
mu sync.RWMutex
released bool
}
// Creates new snapshot object.
func (db *DB) newSnapshot() *Snapshot {
snap := &Snapshot{
db: db,
elem: db.acquireSnapshot(),
}
atomic.AddInt32(&db.aliveSnaps, 1)
runtime.SetFinalizer(snap, (*Snapshot).Release)
return snap
}
func (snap *Snapshot) String() string {
return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
}
// Get gets the value for the given key. It returns ErrNotFound if
// the DB does not contains the key.
//
// The caller should not modify the contents of the returned slice, but
// it is safe to modify the contents of the argument after Get returns.
func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
err = snap.db.ok()
if err != nil {
return
}
snap.mu.RLock()
defer snap.mu.RUnlock()
if snap.released {
err = ErrSnapshotReleased
return
}
return snap.db.get(nil, nil, key, snap.elem.seq, ro)
}
// Has returns true if the DB does contains the given key.
//
// It is safe to modify the contents of the argument after Get returns.
func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
err = snap.db.ok()
if err != nil {
return
}
snap.mu.RLock()
defer snap.mu.RUnlock()
if snap.released {
err = ErrSnapshotReleased
return
}
return snap.db.has(nil, nil, key, snap.elem.seq, ro)
}
// NewIterator returns an iterator for the snapshot of the underlying DB.
// The returned iterator is not safe for concurrent use, but it is safe to use
// multiple iterators concurrently, with each in a dedicated goroutine.
// It is also safe to use an iterator concurrently with modifying its
// underlying DB. The resultant key/value pairs are guaranteed to be
// consistent.
//
// Slice allows slicing the iterator to only contains keys in the given
// range. A nil Range.Start is treated as a key before all keys in the
// DB. And a nil Range.Limit is treated as a key after all keys in
// the DB.
//
// WARNING: Any slice returned by interator (e.g. slice returned by calling
// Iterator.Key() or Iterator.Value() methods), its content should not be
// modified unless noted otherwise.
//
// The iterator must be released after use, by calling Release method.
// Releasing the snapshot doesn't mean releasing the iterator too, the
// iterator would be still valid until released.
//
// Also read Iterator documentation of the leveldb/iterator package.
func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
if err := snap.db.ok(); err != nil {
return iterator.NewEmptyIterator(err)
}
snap.mu.Lock()
defer snap.mu.Unlock()
if snap.released {
return iterator.NewEmptyIterator(ErrSnapshotReleased)
}
// Since iterator already hold version ref, it doesn't need to
// hold snapshot ref.
return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro)
}
// Release releases the snapshot. This will not release any returned
// iterators, the iterators would still be valid until released or the
// underlying DB is closed.
//
// Other methods should not be called after the snapshot has been released.
func (snap *Snapshot) Release() {
snap.mu.Lock()
defer snap.mu.Unlock()
if !snap.released {
// Clear the finalizer.
runtime.SetFinalizer(snap, nil)
snap.released = true
snap.db.releaseSnapshot(snap.elem)
atomic.AddInt32(&snap.db.aliveSnaps, -1)
snap.db = nil
snap.elem = nil
}
}

239
vendor/github.com/syndtr/goleveldb/leveldb/db_state.go generated vendored Normal file
View File

@ -0,0 +1,239 @@
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"errors"
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/storage"
)
var (
errHasFrozenMem = errors.New("has frozen mem")
)
type memDB struct {
db *DB
*memdb.DB
ref int32
}
func (m *memDB) getref() int32 {
return atomic.LoadInt32(&m.ref)
}
func (m *memDB) incref() {
atomic.AddInt32(&m.ref, 1)
}
func (m *memDB) decref() {
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
// Only put back memdb with std capacity.
if m.Capacity() == m.db.s.o.GetWriteBuffer() {
m.Reset()
m.db.mpoolPut(m.DB)
}
m.db = nil
m.DB = nil
} else if ref < 0 {
panic("negative memdb ref")
}
}
// Get latest sequence number.
func (db *DB) getSeq() uint64 {
return atomic.LoadUint64(&db.seq)
}
// Atomically adds delta to seq.
func (db *DB) addSeq(delta uint64) {
atomic.AddUint64(&db.seq, delta)
}
func (db *DB) setSeq(seq uint64) {
atomic.StoreUint64(&db.seq, seq)
}
func (db *DB) sampleSeek(ikey internalKey) {
v := db.s.version()
if v.sampleSeek(ikey) {
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
}
v.release()
}
func (db *DB) mpoolPut(mem *memdb.DB) {
if !db.isClosed() {
select {
case db.memPool <- mem:
default:
}
}
}
func (db *DB) mpoolGet(n int) *memDB {
var mdb *memdb.DB
select {
case mdb = <-db.memPool:
default:
}
if mdb == nil || mdb.Capacity() < n {
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
}
return &memDB{
db: db,
DB: mdb,
}
}
func (db *DB) mpoolDrain() {
ticker := time.NewTicker(30 * time.Second)
for {
select {
case <-ticker.C:
select {
case <-db.memPool:
default:
}
case <-db.closeC:
ticker.Stop()
// Make sure the pool is drained.
select {
case <-db.memPool:
case <-time.After(time.Second):
}
close(db.memPool)
return
}
}
}
// Create new memdb and froze the old one; need external synchronization.
// newMem only called synchronously by the writer.
func (db *DB) newMem(n int) (mem *memDB, err error) {
fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()}
w, err := db.s.stor.Create(fd)
if err != nil {
db.s.reuseFileNum(fd.Num)
return
}
db.memMu.Lock()
defer db.memMu.Unlock()
if db.frozenMem != nil {
return nil, errHasFrozenMem
}
if db.journal == nil {
db.journal = journal.NewWriter(w)
} else {
db.journal.Reset(w)
db.journalWriter.Close()
db.frozenJournalFd = db.journalFd
}
db.journalWriter = w
db.journalFd = fd
db.frozenMem = db.mem
mem = db.mpoolGet(n)
mem.incref() // for self
mem.incref() // for caller
db.mem = mem
// The seq only incremented by the writer. And whoever called newMem
// should hold write lock, so no need additional synchronization here.
db.frozenSeq = db.seq
return
}
// Get all memdbs.
func (db *DB) getMems() (e, f *memDB) {
db.memMu.RLock()
defer db.memMu.RUnlock()
if db.mem != nil {
db.mem.incref()
} else if !db.isClosed() {
panic("nil effective mem")
}
if db.frozenMem != nil {
db.frozenMem.incref()
}
return db.mem, db.frozenMem
}
// Get effective memdb.
func (db *DB) getEffectiveMem() *memDB {
db.memMu.RLock()
defer db.memMu.RUnlock()
if db.mem != nil {
db.mem.incref()
} else if !db.isClosed() {
panic("nil effective mem")
}
return db.mem
}
// Check whether we has frozen memdb.
func (db *DB) hasFrozenMem() bool {
db.memMu.RLock()
defer db.memMu.RUnlock()
return db.frozenMem != nil
}
// Get frozen memdb.
func (db *DB) getFrozenMem() *memDB {
db.memMu.RLock()
defer db.memMu.RUnlock()
if db.frozenMem != nil {
db.frozenMem.incref()
}
return db.frozenMem
}
// Drop frozen memdb; assume that frozen memdb isn't nil.
func (db *DB) dropFrozenMem() {
db.memMu.Lock()
if err := db.s.stor.Remove(db.frozenJournalFd); err != nil {
db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err)
} else {
db.logf("journal@remove removed @%d", db.frozenJournalFd.Num)
}
db.frozenJournalFd = storage.FileDesc{}
db.frozenMem.decref()
db.frozenMem = nil
db.memMu.Unlock()
}
// Clear mems ptr; used by DB.Close().
func (db *DB) clearMems() {
db.memMu.Lock()
db.mem = nil
db.frozenMem = nil
db.memMu.Unlock()
}
// Set closed flag; return true if not already closed.
func (db *DB) setClosed() bool {
return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
}
// Check whether DB was closed.
func (db *DB) isClosed() bool {
return atomic.LoadUint32(&db.closed) != 0
}
// Check read ok status.
func (db *DB) ok() error {
if db.isClosed() {
return ErrClosed
}
return nil
}

View File

@ -0,0 +1,329 @@
// Copyright (c) 2016, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"errors"
"sync"
"time"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
var errTransactionDone = errors.New("leveldb: transaction already closed")
// Transaction is the transaction handle.
type Transaction struct {
db *DB
lk sync.RWMutex
seq uint64
mem *memDB
tables tFiles
ikScratch []byte
rec sessionRecord
stats cStatStaging
closed bool
}
// Get gets the value for the given key. It returns ErrNotFound if the
// DB does not contains the key.
//
// The returned slice is its own copy, it is safe to modify the contents
// of the returned slice.
// It is safe to modify the contents of the argument after Get returns.
func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) {
tr.lk.RLock()
defer tr.lk.RUnlock()
if tr.closed {
return nil, errTransactionDone
}
return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro)
}
// Has returns true if the DB does contains the given key.
//
// It is safe to modify the contents of the argument after Has returns.
func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) {
tr.lk.RLock()
defer tr.lk.RUnlock()
if tr.closed {
return false, errTransactionDone
}
return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro)
}
// NewIterator returns an iterator for the latest snapshot of the transaction.
// The returned iterator is not safe for concurrent use, but it is safe to use
// multiple iterators concurrently, with each in a dedicated goroutine.
// It is also safe to use an iterator concurrently while writes to the
// transaction. The resultant key/value pairs are guaranteed to be consistent.
//
// Slice allows slicing the iterator to only contains keys in the given
// range. A nil Range.Start is treated as a key before all keys in the
// DB. And a nil Range.Limit is treated as a key after all keys in
// the DB.
//
// WARNING: Any slice returned by interator (e.g. slice returned by calling
// Iterator.Key() or Iterator.Key() methods), its content should not be modified
// unless noted otherwise.
//
// The iterator must be released after use, by calling Release method.
//
// Also read Iterator documentation of the leveldb/iterator package.
func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
tr.lk.RLock()
defer tr.lk.RUnlock()
if tr.closed {
return iterator.NewEmptyIterator(errTransactionDone)
}
tr.mem.incref()
return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro)
}
func (tr *Transaction) flush() error {
// Flush memdb.
if tr.mem.Len() != 0 {
tr.stats.startTimer()
iter := tr.mem.NewIterator(nil)
t, n, err := tr.db.s.tops.createFrom(iter)
iter.Release()
tr.stats.stopTimer()
if err != nil {
return err
}
if tr.mem.getref() == 1 {
tr.mem.Reset()
} else {
tr.mem.decref()
tr.mem = tr.db.mpoolGet(0)
tr.mem.incref()
}
tr.tables = append(tr.tables, t)
tr.rec.addTableFile(0, t)
tr.stats.write += t.size
tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
}
return nil
}
func (tr *Transaction) put(kt keyType, key, value []byte) error {
tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt)
if tr.mem.Free() < len(tr.ikScratch)+len(value) {
if err := tr.flush(); err != nil {
return err
}
}
if err := tr.mem.Put(tr.ikScratch, value); err != nil {
return err
}
tr.seq++
return nil
}
// Put sets the value for the given key. It overwrites any previous value
// for that key; a DB is not a multi-map.
// Please note that the transaction is not compacted until committed, so if you
// writes 10 same keys, then those 10 same keys are in the transaction.
//
// It is safe to modify the contents of the arguments after Put returns.
func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error {
tr.lk.Lock()
defer tr.lk.Unlock()
if tr.closed {
return errTransactionDone
}
return tr.put(keyTypeVal, key, value)
}
// Delete deletes the value for the given key.
// Please note that the transaction is not compacted until committed, so if you
// writes 10 same keys, then those 10 same keys are in the transaction.
//
// It is safe to modify the contents of the arguments after Delete returns.
func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error {
tr.lk.Lock()
defer tr.lk.Unlock()
if tr.closed {
return errTransactionDone
}
return tr.put(keyTypeDel, key, nil)
}
// Write apply the given batch to the transaction. The batch will be applied
// sequentially.
// Please note that the transaction is not compacted until committed, so if you
// writes 10 same keys, then those 10 same keys are in the transaction.
//
// It is safe to modify the contents of the arguments after Write returns.
func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error {
if b == nil || b.Len() == 0 {
return nil
}
tr.lk.Lock()
defer tr.lk.Unlock()
if tr.closed {
return errTransactionDone
}
return b.replayInternal(func(i int, kt keyType, k, v []byte) error {
return tr.put(kt, k, v)
})
}
func (tr *Transaction) setDone() {
tr.closed = true
tr.db.tr = nil
tr.mem.decref()
<-tr.db.writeLockC
}
// Commit commits the transaction. If error is not nil, then the transaction is
// not committed, it can then either be retried or discarded.
//
// Other methods should not be called after transaction has been committed.
func (tr *Transaction) Commit() error {
if err := tr.db.ok(); err != nil {
return err
}
tr.lk.Lock()
defer tr.lk.Unlock()
if tr.closed {
return errTransactionDone
}
if err := tr.flush(); err != nil {
// Return error, lets user decide either to retry or discard
// transaction.
return err
}
if len(tr.tables) != 0 {
// Committing transaction.
tr.rec.setSeqNum(tr.seq)
tr.db.compCommitLk.Lock()
tr.stats.startTimer()
var cerr error
for retry := 0; retry < 3; retry++ {
cerr = tr.db.s.commit(&tr.rec)
if cerr != nil {
tr.db.logf("transaction@commit error R·%d %q", retry, cerr)
select {
case <-time.After(time.Second):
case <-tr.db.closeC:
tr.db.logf("transaction@commit exiting")
tr.db.compCommitLk.Unlock()
return cerr
}
} else {
// Success. Set db.seq.
tr.db.setSeq(tr.seq)
break
}
}
tr.stats.stopTimer()
if cerr != nil {
// Return error, lets user decide either to retry or discard
// transaction.
return cerr
}
// Update compaction stats. This is safe as long as we hold compCommitLk.
tr.db.compStats.addStat(0, &tr.stats)
// Trigger table auto-compaction.
tr.db.compTrigger(tr.db.tcompCmdC)
tr.db.compCommitLk.Unlock()
// Additionally, wait compaction when certain threshold reached.
// Ignore error, returns error only if transaction can't be committed.
tr.db.waitCompaction()
}
// Only mark as done if transaction committed successfully.
tr.setDone()
return nil
}
func (tr *Transaction) discard() {
// Discard transaction.
for _, t := range tr.tables {
tr.db.logf("transaction@discard @%d", t.fd.Num)
if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil {
tr.db.s.reuseFileNum(t.fd.Num)
}
}
}
// Discard discards the transaction.
//
// Other methods should not be called after transaction has been discarded.
func (tr *Transaction) Discard() {
tr.lk.Lock()
if !tr.closed {
tr.discard()
tr.setDone()
}
tr.lk.Unlock()
}
func (db *DB) waitCompaction() error {
if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() {
return db.compTriggerWait(db.tcompCmdC)
}
return nil
}
// OpenTransaction opens an atomic DB transaction. Only one transaction can be
// opened at a time. Subsequent call to Write and OpenTransaction will be blocked
// until in-flight transaction is committed or discarded.
// The returned transaction handle is safe for concurrent use.
//
// Transaction is expensive and can overwhelm compaction, especially if
// transaction size is small. Use with caution.
//
// The transaction must be closed once done, either by committing or discarding
// the transaction.
// Closing the DB will discard open transaction.
func (db *DB) OpenTransaction() (*Transaction, error) {
if err := db.ok(); err != nil {
return nil, err
}
// The write happen synchronously.
select {
case db.writeLockC <- struct{}{}:
case err := <-db.compPerErrC:
return nil, err
case <-db.closeC:
return nil, ErrClosed
}
if db.tr != nil {
panic("leveldb: has open transaction")
}
// Flush current memdb.
if db.mem != nil && db.mem.Len() != 0 {
if _, err := db.rotateMem(0, true); err != nil {
return nil, err
}
}
// Wait compaction when certain threshold reached.
if err := db.waitCompaction(); err != nil {
return nil, err
}
tr := &Transaction{
db: db,
seq: db.seq,
mem: db.mpoolGet(0),
}
tr.mem.incref()
db.tr = tr
return tr, nil
}

102
vendor/github.com/syndtr/goleveldb/leveldb/db_util.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
// Reader is the interface that wraps basic Get and NewIterator methods.
// This interface implemented by both DB and Snapshot.
type Reader interface {
Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
}
// Sizes is list of size.
type Sizes []int64
// Sum returns sum of the sizes.
func (sizes Sizes) Sum() int64 {
var sum int64
for _, size := range sizes {
sum += size
}
return sum
}
// Logging.
func (db *DB) log(v ...interface{}) { db.s.log(v...) }
func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
// Check and clean files.
func (db *DB) checkAndCleanFiles() error {
v := db.s.version()
defer v.release()
tmap := make(map[int64]bool)
for _, tables := range v.levels {
for _, t := range tables {
tmap[t.fd.Num] = false
}
}
fds, err := db.s.stor.List(storage.TypeAll)
if err != nil {
return err
}
var nt int
var rem []storage.FileDesc
for _, fd := range fds {
keep := true
switch fd.Type {
case storage.TypeManifest:
keep = fd.Num >= db.s.manifestFd.Num
case storage.TypeJournal:
if !db.frozenJournalFd.Zero() {
keep = fd.Num >= db.frozenJournalFd.Num
} else {
keep = fd.Num >= db.journalFd.Num
}
case storage.TypeTable:
_, keep = tmap[fd.Num]
if keep {
tmap[fd.Num] = true
nt++
}
}
if !keep {
rem = append(rem, fd)
}
}
if nt != len(tmap) {
var mfds []storage.FileDesc
for num, present := range tmap {
if !present {
mfds = append(mfds, storage.FileDesc{Type: storage.TypeTable, Num: num})
db.logf("db@janitor table missing @%d", num)
}
}
return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds})
}
db.logf("db@janitor F·%d G·%d", len(fds), len(rem))
for _, fd := range rem {
db.logf("db@janitor removing %s-%d", fd.Type, fd.Num)
if err := db.s.stor.Remove(fd); err != nil {
return err
}
}
return nil
}

464
vendor/github.com/syndtr/goleveldb/leveldb/db_write.go generated vendored Normal file
View File

@ -0,0 +1,464 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error {
wr, err := db.journal.Next()
if err != nil {
return err
}
if err := writeBatchesWithHeader(wr, batches, seq); err != nil {
return err
}
if err := db.journal.Flush(); err != nil {
return err
}
if sync {
return db.journalWriter.Sync()
}
return nil
}
func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) {
retryLimit := 3
retry:
// Wait for pending memdb compaction.
err = db.compTriggerWait(db.mcompCmdC)
if err != nil {
return
}
retryLimit--
// Create new memdb and journal.
mem, err = db.newMem(n)
if err != nil {
if err == errHasFrozenMem {
if retryLimit <= 0 {
panic("BUG: still has frozen memdb")
}
goto retry
}
return
}
// Schedule memdb compaction.
if wait {
err = db.compTriggerWait(db.mcompCmdC)
} else {
db.compTrigger(db.mcompCmdC)
}
return
}
func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
delayed := false
slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger()
pauseTrigger := db.s.o.GetWriteL0PauseTrigger()
flush := func() (retry bool) {
mdb = db.getEffectiveMem()
if mdb == nil {
err = ErrClosed
return false
}
defer func() {
if retry {
mdb.decref()
mdb = nil
}
}()
tLen := db.s.tLen(0)
mdbFree = mdb.Free()
switch {
case tLen >= slowdownTrigger && !delayed:
delayed = true
time.Sleep(time.Millisecond)
case mdbFree >= n:
return false
case tLen >= pauseTrigger:
delayed = true
// Set the write paused flag explicitly.
atomic.StoreInt32(&db.inWritePaused, 1)
err = db.compTriggerWait(db.tcompCmdC)
// Unset the write paused flag.
atomic.StoreInt32(&db.inWritePaused, 0)
if err != nil {
return false
}
default:
// Allow memdb to grow if it has no entry.
if mdb.Len() == 0 {
mdbFree = n
} else {
mdb.decref()
mdb, err = db.rotateMem(n, false)
if err == nil {
mdbFree = mdb.Free()
} else {
mdbFree = 0
}
}
return false
}
return true
}
start := time.Now()
for flush() {
}
if delayed {
db.writeDelay += time.Since(start)
db.writeDelayN++
} else if db.writeDelayN > 0 {
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN))
atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay))
db.writeDelay = 0
db.writeDelayN = 0
}
return
}
type writeMerge struct {
sync bool
batch *Batch
keyType keyType
key, value []byte
}
func (db *DB) unlockWrite(overflow bool, merged int, err error) {
for i := 0; i < merged; i++ {
db.writeAckC <- err
}
if overflow {
// Pass lock to the next write (that failed to merge).
db.writeMergedC <- false
} else {
// Release lock.
<-db.writeLockC
}
}
// ourBatch is batch that we can modify.
func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error {
// Try to flush memdb. This method would also trying to throttle writes
// if it is too fast and compaction cannot catch-up.
mdb, mdbFree, err := db.flush(batch.internalLen)
if err != nil {
db.unlockWrite(false, 0, err)
return err
}
defer mdb.decref()
var (
overflow bool
merged int
batches = []*Batch{batch}
)
if merge {
// Merge limit.
var mergeLimit int
if batch.internalLen > 128<<10 {
mergeLimit = (1 << 20) - batch.internalLen
} else {
mergeLimit = 128 << 10
}
mergeCap := mdbFree - batch.internalLen
if mergeLimit > mergeCap {
mergeLimit = mergeCap
}
merge:
for mergeLimit > 0 {
select {
case incoming := <-db.writeMergeC:
if incoming.batch != nil {
// Merge batch.
if incoming.batch.internalLen > mergeLimit {
overflow = true
break merge
}
batches = append(batches, incoming.batch)
mergeLimit -= incoming.batch.internalLen
} else {
// Merge put.
internalLen := len(incoming.key) + len(incoming.value) + 8
if internalLen > mergeLimit {
overflow = true
break merge
}
if ourBatch == nil {
ourBatch = db.batchPool.Get().(*Batch)
ourBatch.Reset()
batches = append(batches, ourBatch)
}
// We can use same batch since concurrent write doesn't
// guarantee write order.
ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value)
mergeLimit -= internalLen
}
sync = sync || incoming.sync
merged++
db.writeMergedC <- true
default:
break merge
}
}
}
// Release ourBatch if any.
if ourBatch != nil {
defer db.batchPool.Put(ourBatch)
}
// Seq number.
seq := db.seq + 1
// Write journal.
if err := db.writeJournal(batches, seq, sync); err != nil {
db.unlockWrite(overflow, merged, err)
return err
}
// Put batches.
for _, batch := range batches {
if err := batch.putMem(seq, mdb.DB); err != nil {
panic(err)
}
seq += uint64(batch.Len())
}
// Incr seq number.
db.addSeq(uint64(batchesLen(batches)))
// Rotate memdb if it's reach the threshold.
if batch.internalLen >= mdbFree {
db.rotateMem(0, false)
}
db.unlockWrite(overflow, merged, nil)
return nil
}
// Write apply the given batch to the DB. The batch records will be applied
// sequentially. Write might be used concurrently, when used concurrently and
// batch is small enough, write will try to merge the batches. Set NoWriteMerge
// option to true to disable write merge.
//
// It is safe to modify the contents of the arguments after Write returns but
// not before. Write will not modify content of the batch.
func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error {
if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 {
return err
}
// If the batch size is larger than write buffer, it may justified to write
// using transaction instead. Using transaction the batch will be written
// into tables directly, skipping the journaling.
if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() {
tr, err := db.OpenTransaction()
if err != nil {
return err
}
if err := tr.Write(batch, wo); err != nil {
tr.Discard()
return err
}
return tr.Commit()
}
merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
sync := wo.GetSync() && !db.s.o.GetNoSync()
// Acquire write lock.
if merge {
select {
case db.writeMergeC <- writeMerge{sync: sync, batch: batch}:
if <-db.writeMergedC {
// Write is merged.
return <-db.writeAckC
}
// Write is not merged, the write lock is handed to us. Continue.
case db.writeLockC <- struct{}{}:
// Write lock acquired.
case err := <-db.compPerErrC:
// Compaction error.
return err
case <-db.closeC:
// Closed
return ErrClosed
}
} else {
select {
case db.writeLockC <- struct{}{}:
// Write lock acquired.
case err := <-db.compPerErrC:
// Compaction error.
return err
case <-db.closeC:
// Closed
return ErrClosed
}
}
return db.writeLocked(batch, nil, merge, sync)
}
func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error {
if err := db.ok(); err != nil {
return err
}
merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
sync := wo.GetSync() && !db.s.o.GetNoSync()
// Acquire write lock.
if merge {
select {
case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}:
if <-db.writeMergedC {
// Write is merged.
return <-db.writeAckC
}
// Write is not merged, the write lock is handed to us. Continue.
case db.writeLockC <- struct{}{}:
// Write lock acquired.
case err := <-db.compPerErrC:
// Compaction error.
return err
case <-db.closeC:
// Closed
return ErrClosed
}
} else {
select {
case db.writeLockC <- struct{}{}:
// Write lock acquired.
case err := <-db.compPerErrC:
// Compaction error.
return err
case <-db.closeC:
// Closed
return ErrClosed
}
}
batch := db.batchPool.Get().(*Batch)
batch.Reset()
batch.appendRec(kt, key, value)
return db.writeLocked(batch, batch, merge, sync)
}
// Put sets the value for the given key. It overwrites any previous value
// for that key; a DB is not a multi-map. Write merge also applies for Put, see
// Write.
//
// It is safe to modify the contents of the arguments after Put returns but not
// before.
func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
return db.putRec(keyTypeVal, key, value, wo)
}
// Delete deletes the value for the given key. Delete will not returns error if
// key doesn't exist. Write merge also applies for Delete, see Write.
//
// It is safe to modify the contents of the arguments after Delete returns but
// not before.
func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
return db.putRec(keyTypeDel, key, nil, wo)
}
func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
iter := mem.NewIterator(nil)
defer iter.Release()
return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
(min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
}
// CompactRange compacts the underlying DB for the given key range.
// In particular, deleted and overwritten versions are discarded,
// and the data is rearranged to reduce the cost of operations
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
// A nil Range.Start is treated as a key before all keys in the DB.
// And a nil Range.Limit is treated as a key after all keys in the DB.
// Therefore if both is nil then it will compact entire DB.
func (db *DB) CompactRange(r util.Range) error {
if err := db.ok(); err != nil {
return err
}
// Lock writer.
select {
case db.writeLockC <- struct{}{}:
case err := <-db.compPerErrC:
return err
case <-db.closeC:
return ErrClosed
}
// Check for overlaps in memdb.
mdb := db.getEffectiveMem()
if mdb == nil {
return ErrClosed
}
defer mdb.decref()
if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
// Memdb compaction.
if _, err := db.rotateMem(0, false); err != nil {
<-db.writeLockC
return err
}
<-db.writeLockC
if err := db.compTriggerWait(db.mcompCmdC); err != nil {
return err
}
} else {
<-db.writeLockC
}
// Table compaction.
return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit)
}
// SetReadOnly makes DB read-only. It will stay read-only until reopened.
func (db *DB) SetReadOnly() error {
if err := db.ok(); err != nil {
return err
}
// Lock writer.
select {
case db.writeLockC <- struct{}{}:
db.compWriteLocking = true
case err := <-db.compPerErrC:
return err
case <-db.closeC:
return ErrClosed
}
// Set compaction read-only.
select {
case db.compErrSetC <- ErrReadOnly:
case perr := <-db.compPerErrC:
return perr
case <-db.closeC:
return ErrClosed
}
return nil
}

92
vendor/github.com/syndtr/goleveldb/leveldb/doc.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package leveldb provides implementation of LevelDB key/value database.
//
// Create or open a database:
//
// // The returned DB instance is safe for concurrent use. Which mean that all
// // DB's methods may be called concurrently from multiple goroutine.
// db, err := leveldb.OpenFile("path/to/db", nil)
// ...
// defer db.Close()
// ...
//
// Read or modify the database content:
//
// // Remember that the contents of the returned slice should not be modified.
// data, err := db.Get([]byte("key"), nil)
// ...
// err = db.Put([]byte("key"), []byte("value"), nil)
// ...
// err = db.Delete([]byte("key"), nil)
// ...
//
// Iterate over database content:
//
// iter := db.NewIterator(nil, nil)
// for iter.Next() {
// // Remember that the contents of the returned slice should not be modified, and
// // only valid until the next call to Next.
// key := iter.Key()
// value := iter.Value()
// ...
// }
// iter.Release()
// err = iter.Error()
// ...
//
// Iterate over subset of database content with a particular prefix:
// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
// for iter.Next() {
// // Use key/value.
// ...
// }
// iter.Release()
// err = iter.Error()
// ...
//
// Seek-then-Iterate:
//
// iter := db.NewIterator(nil, nil)
// for ok := iter.Seek(key); ok; ok = iter.Next() {
// // Use key/value.
// ...
// }
// iter.Release()
// err = iter.Error()
// ...
//
// Iterate over subset of database content:
//
// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
// for iter.Next() {
// // Use key/value.
// ...
// }
// iter.Release()
// err = iter.Error()
// ...
//
// Batch writes:
//
// batch := new(leveldb.Batch)
// batch.Put([]byte("foo"), []byte("value"))
// batch.Put([]byte("bar"), []byte("another value"))
// batch.Delete([]byte("baz"))
// err = db.Write(batch, nil)
// ...
//
// Use bloom filter:
//
// o := &opt.Options{
// Filter: filter.NewBloomFilter(10),
// }
// db, err := leveldb.OpenFile("path/to/db", o)
// ...
// defer db.Close()
// ...
package leveldb

20
vendor/github.com/syndtr/goleveldb/leveldb/errors.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"github.com/syndtr/goleveldb/leveldb/errors"
)
// Common errors.
var (
ErrNotFound = errors.ErrNotFound
ErrReadOnly = errors.New("leveldb: read-only mode")
ErrSnapshotReleased = errors.New("leveldb: snapshot released")
ErrIterReleased = errors.New("leveldb: iterator released")
ErrClosed = errors.New("leveldb: closed")
)

View File

@ -0,0 +1,78 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package errors provides common error types used throughout leveldb.
package errors
import (
"errors"
"fmt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
// Common errors.
var (
ErrNotFound = New("leveldb: not found")
ErrReleased = util.ErrReleased
ErrHasReleaser = util.ErrHasReleaser
)
// New returns an error that formats as the given text.
func New(text string) error {
return errors.New(text)
}
// ErrCorrupted is the type that wraps errors that indicate corruption in
// the database.
type ErrCorrupted struct {
Fd storage.FileDesc
Err error
}
func (e *ErrCorrupted) Error() string {
if !e.Fd.Zero() {
return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
}
return e.Err.Error()
}
// NewErrCorrupted creates new ErrCorrupted error.
func NewErrCorrupted(fd storage.FileDesc, err error) error {
return &ErrCorrupted{fd, err}
}
// IsCorrupted returns a boolean indicating whether the error is indicating
// a corruption.
func IsCorrupted(err error) bool {
switch err.(type) {
case *ErrCorrupted:
return true
case *storage.ErrCorrupted:
return true
}
return false
}
// ErrMissingFiles is the type that indicating a corruption due to missing
// files. ErrMissingFiles always wrapped with ErrCorrupted.
type ErrMissingFiles struct {
Fds []storage.FileDesc
}
func (e *ErrMissingFiles) Error() string { return "file missing" }
// SetFd sets 'file info' of the given error with the given file.
// Currently only ErrCorrupted is supported, otherwise will do nothing.
func SetFd(err error, fd storage.FileDesc) error {
switch x := err.(type) {
case *ErrCorrupted:
x.Fd = fd
return x
}
return err
}

31
vendor/github.com/syndtr/goleveldb/leveldb/filter.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"github.com/syndtr/goleveldb/leveldb/filter"
)
type iFilter struct {
filter.Filter
}
func (f iFilter) Contains(filter, key []byte) bool {
return f.Filter.Contains(filter, internalKey(key).ukey())
}
func (f iFilter) NewGenerator() filter.FilterGenerator {
return iFilterGenerator{f.Filter.NewGenerator()}
}
type iFilterGenerator struct {
filter.FilterGenerator
}
func (g iFilterGenerator) Add(key []byte) {
g.FilterGenerator.Add(internalKey(key).ukey())
}

View File

@ -0,0 +1,116 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package filter
import (
"github.com/syndtr/goleveldb/leveldb/util"
)
func bloomHash(key []byte) uint32 {
return util.Hash(key, 0xbc9f1d34)
}
type bloomFilter int
// The bloom filter serializes its parameters and is backward compatible
// with respect to them. Therefor, its parameters are not added to its
// name.
func (bloomFilter) Name() string {
return "leveldb.BuiltinBloomFilter"
}
func (f bloomFilter) Contains(filter, key []byte) bool {
nBytes := len(filter) - 1
if nBytes < 1 {
return false
}
nBits := uint32(nBytes * 8)
// Use the encoded k so that we can read filters generated by
// bloom filters created using different parameters.
k := filter[nBytes]
if k > 30 {
// Reserved for potentially new encodings for short bloom filters.
// Consider it a match.
return true
}
kh := bloomHash(key)
delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
for j := uint8(0); j < k; j++ {
bitpos := kh % nBits
if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 {
return false
}
kh += delta
}
return true
}
func (f bloomFilter) NewGenerator() FilterGenerator {
// Round down to reduce probing cost a little bit.
k := uint8(f * 69 / 100) // 0.69 =~ ln(2)
if k < 1 {
k = 1
} else if k > 30 {
k = 30
}
return &bloomFilterGenerator{
n: int(f),
k: k,
}
}
type bloomFilterGenerator struct {
n int
k uint8
keyHashes []uint32
}
func (g *bloomFilterGenerator) Add(key []byte) {
// Use double-hashing to generate a sequence of hash values.
// See analysis in [Kirsch,Mitzenmacher 2006].
g.keyHashes = append(g.keyHashes, bloomHash(key))
}
func (g *bloomFilterGenerator) Generate(b Buffer) {
// Compute bloom filter size (in both bits and bytes)
nBits := uint32(len(g.keyHashes) * g.n)
// For small n, we can see a very high false positive rate. Fix it
// by enforcing a minimum bloom filter length.
if nBits < 64 {
nBits = 64
}
nBytes := (nBits + 7) / 8
nBits = nBytes * 8
dest := b.Alloc(int(nBytes) + 1)
dest[nBytes] = g.k
for _, kh := range g.keyHashes {
delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
for j := uint8(0); j < g.k; j++ {
bitpos := kh % nBits
dest[bitpos/8] |= (1 << (bitpos % 8))
kh += delta
}
}
g.keyHashes = g.keyHashes[:0]
}
// NewBloomFilter creates a new initialized bloom filter for given
// bitsPerKey.
//
// Since bitsPerKey is persisted individually for each bloom filter
// serialization, bloom filters are backwards compatible with respect to
// changing bitsPerKey. This means that no big performance penalty will
// be experienced when changing the parameter. See documentation for
// opt.Options.Filter for more information.
func NewBloomFilter(bitsPerKey int) Filter {
return bloomFilter(bitsPerKey)
}

View File

@ -0,0 +1,60 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package filter provides interface and implementation of probabilistic
// data structure.
//
// The filter is resposible for creating small filter from a set of keys.
// These filter will then used to test whether a key is a member of the set.
// In many cases, a filter can cut down the number of disk seeks from a
// handful to a single disk seek per DB.Get call.
package filter
// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods.
type Buffer interface {
// Alloc allocs n bytes of slice from the buffer. This also advancing
// write offset.
Alloc(n int) []byte
// Write appends the contents of p to the buffer.
Write(p []byte) (n int, err error)
// WriteByte appends the byte c to the buffer.
WriteByte(c byte) error
}
// Filter is the filter.
type Filter interface {
// Name returns the name of this policy.
//
// Note that if the filter encoding changes in an incompatible way,
// the name returned by this method must be changed. Otherwise, old
// incompatible filters may be passed to methods of this type.
Name() string
// NewGenerator creates a new filter generator.
NewGenerator() FilterGenerator
// Contains returns true if the filter contains the given key.
//
// The filter are filters generated by the filter generator.
Contains(filter, key []byte) bool
}
// FilterGenerator is the filter generator.
type FilterGenerator interface {
// Add adds a key to the filter generator.
//
// The key may become invalid after call to this method end, therefor
// key must be copied if implementation require keeping key for later
// use. The key should not modified directly, doing so may cause
// undefined results.
Add(key []byte)
// Generate generates filters based on keys passed so far. After call
// to Generate the filter generator maybe resetted, depends on implementation.
Generate(b Buffer)
}

View File

@ -0,0 +1,184 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator
import (
"github.com/syndtr/goleveldb/leveldb/util"
)
// BasicArray is the interface that wraps basic Len and Search method.
type BasicArray interface {
// Len returns length of the array.
Len() int
// Search finds smallest index that point to a key that is greater
// than or equal to the given key.
Search(key []byte) int
}
// Array is the interface that wraps BasicArray and basic Index method.
type Array interface {
BasicArray
// Index returns key/value pair with index of i.
Index(i int) (key, value []byte)
}
// Array is the interface that wraps BasicArray and basic Get method.
type ArrayIndexer interface {
BasicArray
// Get returns a new data iterator with index of i.
Get(i int) Iterator
}
type basicArrayIterator struct {
util.BasicReleaser
array BasicArray
pos int
err error
}
func (i *basicArrayIterator) Valid() bool {
return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
}
func (i *basicArrayIterator) First() bool {
if i.Released() {
i.err = ErrIterReleased
return false
}
if i.array.Len() == 0 {
i.pos = -1
return false
}
i.pos = 0
return true
}
func (i *basicArrayIterator) Last() bool {
if i.Released() {
i.err = ErrIterReleased
return false
}
n := i.array.Len()
if n == 0 {
i.pos = 0
return false
}
i.pos = n - 1
return true
}
func (i *basicArrayIterator) Seek(key []byte) bool {
if i.Released() {
i.err = ErrIterReleased
return false
}
n := i.array.Len()
if n == 0 {
i.pos = 0
return false
}
i.pos = i.array.Search(key)
if i.pos >= n {
return false
}
return true
}
func (i *basicArrayIterator) Next() bool {
if i.Released() {
i.err = ErrIterReleased
return false
}
i.pos++
if n := i.array.Len(); i.pos >= n {
i.pos = n
return false
}
return true
}
func (i *basicArrayIterator) Prev() bool {
if i.Released() {
i.err = ErrIterReleased
return false
}
i.pos--
if i.pos < 0 {
i.pos = -1
return false
}
return true
}
func (i *basicArrayIterator) Error() error { return i.err }
type arrayIterator struct {
basicArrayIterator
array Array
pos int
key, value []byte
}
func (i *arrayIterator) updateKV() {
if i.pos == i.basicArrayIterator.pos {
return
}
i.pos = i.basicArrayIterator.pos
if i.Valid() {
i.key, i.value = i.array.Index(i.pos)
} else {
i.key = nil
i.value = nil
}
}
func (i *arrayIterator) Key() []byte {
i.updateKV()
return i.key
}
func (i *arrayIterator) Value() []byte {
i.updateKV()
return i.value
}
type arrayIteratorIndexer struct {
basicArrayIterator
array ArrayIndexer
}
func (i *arrayIteratorIndexer) Get() Iterator {
if i.Valid() {
return i.array.Get(i.basicArrayIterator.pos)
}
return nil
}
// NewArrayIterator returns an iterator from the given array.
func NewArrayIterator(array Array) Iterator {
return &arrayIterator{
basicArrayIterator: basicArrayIterator{array: array, pos: -1},
array: array,
pos: -1,
}
}
// NewArrayIndexer returns an index iterator from the given array.
func NewArrayIndexer(array ArrayIndexer) IteratorIndexer {
return &arrayIteratorIndexer{
basicArrayIterator: basicArrayIterator{array: array, pos: -1},
array: array,
}
}

View File

@ -0,0 +1,242 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator
import (
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
// IteratorIndexer is the interface that wraps CommonIterator and basic Get
// method. IteratorIndexer provides index for indexed iterator.
type IteratorIndexer interface {
CommonIterator
// Get returns a new data iterator for the current position, or nil if
// done.
Get() Iterator
}
type indexedIterator struct {
util.BasicReleaser
index IteratorIndexer
strict bool
data Iterator
err error
errf func(err error)
closed bool
}
func (i *indexedIterator) setData() {
if i.data != nil {
i.data.Release()
}
i.data = i.index.Get()
}
func (i *indexedIterator) clearData() {
if i.data != nil {
i.data.Release()
}
i.data = nil
}
func (i *indexedIterator) indexErr() {
if err := i.index.Error(); err != nil {
if i.errf != nil {
i.errf(err)
}
i.err = err
}
}
func (i *indexedIterator) dataErr() bool {
if err := i.data.Error(); err != nil {
if i.errf != nil {
i.errf(err)
}
if i.strict || !errors.IsCorrupted(err) {
i.err = err
return true
}
}
return false
}
func (i *indexedIterator) Valid() bool {
return i.data != nil && i.data.Valid()
}
func (i *indexedIterator) First() bool {
if i.err != nil {
return false
} else if i.Released() {
i.err = ErrIterReleased
return false
}
if !i.index.First() {
i.indexErr()
i.clearData()
return false
}
i.setData()
return i.Next()
}
func (i *indexedIterator) Last() bool {
if i.err != nil {
return false
} else if i.Released() {
i.err = ErrIterReleased
return false
}
if !i.index.Last() {
i.indexErr()
i.clearData()
return false
}
i.setData()
if !i.data.Last() {
if i.dataErr() {
return false
}
i.clearData()
return i.Prev()
}
return true
}
func (i *indexedIterator) Seek(key []byte) bool {
if i.err != nil {
return false
} else if i.Released() {
i.err = ErrIterReleased
return false
}
if !i.index.Seek(key) {
i.indexErr()
i.clearData()
return false
}
i.setData()
if !i.data.Seek(key) {
if i.dataErr() {
return false
}
i.clearData()
return i.Next()
}
return true
}
func (i *indexedIterator) Next() bool {
if i.err != nil {
return false
} else if i.Released() {
i.err = ErrIterReleased
return false
}
switch {
case i.data != nil && !i.data.Next():
if i.dataErr() {
return false
}
i.clearData()
fallthrough
case i.data == nil:
if !i.index.Next() {
i.indexErr()
return false
}
i.setData()
return i.Next()
}
return true
}
func (i *indexedIterator) Prev() bool {
if i.err != nil {
return false
} else if i.Released() {
i.err = ErrIterReleased
return false
}
switch {
case i.data != nil && !i.data.Prev():
if i.dataErr() {
return false
}
i.clearData()
fallthrough
case i.data == nil:
if !i.index.Prev() {
i.indexErr()
return false
}
i.setData()
if !i.data.Last() {
if i.dataErr() {
return false
}
i.clearData()
return i.Prev()
}
}
return true
}
func (i *indexedIterator) Key() []byte {
if i.data == nil {
return nil
}
return i.data.Key()
}
func (i *indexedIterator) Value() []byte {
if i.data == nil {
return nil
}
return i.data.Value()
}
func (i *indexedIterator) Release() {
i.clearData()
i.index.Release()
i.BasicReleaser.Release()
}
func (i *indexedIterator) Error() error {
if i.err != nil {
return i.err
}
if err := i.index.Error(); err != nil {
return err
}
return nil
}
func (i *indexedIterator) SetErrorCallback(f func(err error)) {
i.errf = f
}
// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
// that returns another iterator, a 'data iterator'. A 'data iterator' is the
// iterator that contains actual key/value pairs.
//
// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
// ignored and will halt the iterator.
func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
return &indexedIterator{index: index, strict: strict}
}

View File

@ -0,0 +1,132 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package iterator provides interface and implementation to traverse over
// contents of a database.
package iterator
import (
"errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
ErrIterReleased = errors.New("leveldb/iterator: iterator released")
)
// IteratorSeeker is the interface that wraps the 'seeks method'.
type IteratorSeeker interface {
// First moves the iterator to the first key/value pair. If the iterator
// only contains one key/value pair then First and Last would moves
// to the same key/value pair.
// It returns whether such pair exist.
First() bool
// Last moves the iterator to the last key/value pair. If the iterator
// only contains one key/value pair then First and Last would moves
// to the same key/value pair.
// It returns whether such pair exist.
Last() bool
// Seek moves the iterator to the first key/value pair whose key is greater
// than or equal to the given key.
// It returns whether such pair exist.
//
// It is safe to modify the contents of the argument after Seek returns.
Seek(key []byte) bool
// Next moves the iterator to the next key/value pair.
// It returns false if the iterator is exhausted.
Next() bool
// Prev moves the iterator to the previous key/value pair.
// It returns false if the iterator is exhausted.
Prev() bool
}
// CommonIterator is the interface that wraps common iterator methods.
type CommonIterator interface {
IteratorSeeker
// util.Releaser is the interface that wraps basic Release method.
// When called Release will releases any resources associated with the
// iterator.
util.Releaser
// util.ReleaseSetter is the interface that wraps the basic SetReleaser
// method.
util.ReleaseSetter
// TODO: Remove this when ready.
Valid() bool
// Error returns any accumulated error. Exhausting all the key/value pairs
// is not considered to be an error.
Error() error
}
// Iterator iterates over a DB's key/value pairs in key order.
//
// When encounter an error any 'seeks method' will return false and will
// yield no key/value pairs. The error can be queried by calling the Error
// method. Calling Release is still necessary.
//
// An iterator must be released after use, but it is not necessary to read
// an iterator until exhaustion.
// Also, an iterator is not necessarily safe for concurrent use, but it is
// safe to use multiple iterators concurrently, with each in a dedicated
// goroutine.
type Iterator interface {
CommonIterator
// Key returns the key of the current key/value pair, or nil if done.
// The caller should not modify the contents of the returned slice, and
// its contents may change on the next call to any 'seeks method'.
Key() []byte
// Value returns the value of the current key/value pair, or nil if done.
// The caller should not modify the contents of the returned slice, and
// its contents may change on the next call to any 'seeks method'.
Value() []byte
}
// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback
// method.
//
// ErrorCallbackSetter implemented by indexed and merged iterator.
type ErrorCallbackSetter interface {
// SetErrorCallback allows set an error callback of the corresponding
// iterator. Use nil to clear the callback.
SetErrorCallback(f func(err error))
}
type emptyIterator struct {
util.BasicReleaser
err error
}
func (i *emptyIterator) rErr() {
if i.err == nil && i.Released() {
i.err = ErrIterReleased
}
}
func (*emptyIterator) Valid() bool { return false }
func (i *emptyIterator) First() bool { i.rErr(); return false }
func (i *emptyIterator) Last() bool { i.rErr(); return false }
func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false }
func (i *emptyIterator) Next() bool { i.rErr(); return false }
func (i *emptyIterator) Prev() bool { i.rErr(); return false }
func (*emptyIterator) Key() []byte { return nil }
func (*emptyIterator) Value() []byte { return nil }
func (i *emptyIterator) Error() error { return i.err }
// NewEmptyIterator creates an empty iterator. The err parameter can be
// nil, but if not nil the given err will be returned by Error method.
func NewEmptyIterator(err error) Iterator {
return &emptyIterator{err: err}
}

View File

@ -0,0 +1,304 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator
import (
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
type dir int
const (
dirReleased dir = iota - 1
dirSOI
dirEOI
dirBackward
dirForward
)
type mergedIterator struct {
cmp comparer.Comparer
iters []Iterator
strict bool
keys [][]byte
index int
dir dir
err error
errf func(err error)
releaser util.Releaser
}
func assertKey(key []byte) []byte {
if key == nil {
panic("leveldb/iterator: nil key")
}
return key
}
func (i *mergedIterator) iterErr(iter Iterator) bool {
if err := iter.Error(); err != nil {
if i.errf != nil {
i.errf(err)
}
if i.strict || !errors.IsCorrupted(err) {
i.err = err
return true
}
}
return false
}
func (i *mergedIterator) Valid() bool {
return i.err == nil && i.dir > dirEOI
}
func (i *mergedIterator) First() bool {
if i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
for x, iter := range i.iters {
switch {
case iter.First():
i.keys[x] = assertKey(iter.Key())
case i.iterErr(iter):
return false
default:
i.keys[x] = nil
}
}
i.dir = dirSOI
return i.next()
}
func (i *mergedIterator) Last() bool {
if i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
for x, iter := range i.iters {
switch {
case iter.Last():
i.keys[x] = assertKey(iter.Key())
case i.iterErr(iter):
return false
default:
i.keys[x] = nil
}
}
i.dir = dirEOI
return i.prev()
}
func (i *mergedIterator) Seek(key []byte) bool {
if i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
for x, iter := range i.iters {
switch {
case iter.Seek(key):
i.keys[x] = assertKey(iter.Key())
case i.iterErr(iter):
return false
default:
i.keys[x] = nil
}
}
i.dir = dirSOI
return i.next()
}
func (i *mergedIterator) next() bool {
var key []byte
if i.dir == dirForward {
key = i.keys[i.index]
}
for x, tkey := range i.keys {
if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) {
key = tkey
i.index = x
}
}
if key == nil {
i.dir = dirEOI
return false
}
i.dir = dirForward
return true
}
func (i *mergedIterator) Next() bool {
if i.dir == dirEOI || i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
switch i.dir {
case dirSOI:
return i.First()
case dirBackward:
key := append([]byte{}, i.keys[i.index]...)
if !i.Seek(key) {
return false
}
return i.Next()
}
x := i.index
iter := i.iters[x]
switch {
case iter.Next():
i.keys[x] = assertKey(iter.Key())
case i.iterErr(iter):
return false
default:
i.keys[x] = nil
}
return i.next()
}
func (i *mergedIterator) prev() bool {
var key []byte
if i.dir == dirBackward {
key = i.keys[i.index]
}
for x, tkey := range i.keys {
if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) {
key = tkey
i.index = x
}
}
if key == nil {
i.dir = dirSOI
return false
}
i.dir = dirBackward
return true
}
func (i *mergedIterator) Prev() bool {
if i.dir == dirSOI || i.err != nil {
return false
} else if i.dir == dirReleased {
i.err = ErrIterReleased
return false
}
switch i.dir {
case dirEOI:
return i.Last()
case dirForward:
key := append([]byte{}, i.keys[i.index]...)
for x, iter := range i.iters {
if x == i.index {
continue
}
seek := iter.Seek(key)
switch {
case seek && iter.Prev(), !seek && iter.Last():
i.keys[x] = assertKey(iter.Key())
case i.iterErr(iter):
return false
default:
i.keys[x] = nil
}
}
}
x := i.index
iter := i.iters[x]
switch {
case iter.Prev():
i.keys[x] = assertKey(iter.Key())
case i.iterErr(iter):
return false
default:
i.keys[x] = nil
}
return i.prev()
}
func (i *mergedIterator) Key() []byte {
if i.err != nil || i.dir <= dirEOI {
return nil
}
return i.keys[i.index]
}
func (i *mergedIterator) Value() []byte {
if i.err != nil || i.dir <= dirEOI {
return nil
}
return i.iters[i.index].Value()
}
func (i *mergedIterator) Release() {
if i.dir != dirReleased {
i.dir = dirReleased
for _, iter := range i.iters {
iter.Release()
}
i.iters = nil
i.keys = nil
if i.releaser != nil {
i.releaser.Release()
i.releaser = nil
}
}
}
func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
if i.dir == dirReleased {
panic(util.ErrReleased)
}
if i.releaser != nil && releaser != nil {
panic(util.ErrHasReleaser)
}
i.releaser = releaser
}
func (i *mergedIterator) Error() error {
return i.err
}
func (i *mergedIterator) SetErrorCallback(f func(err error)) {
i.errf = f
}
// NewMergedIterator returns an iterator that merges its input. Walking the
// resultant iterator will return all key/value pairs of all input iterators
// in strictly increasing key order, as defined by cmp.
// The input's key ranges may overlap, but there are assumed to be no duplicate
// keys: if iters[i] contains a key k then iters[j] will not contain that key k.
// None of the iters may be nil.
//
// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
// won't be ignored and will halt 'merged iterator', otherwise the iterator will
// continue to the next 'input iterator'.
func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
return &mergedIterator{
iters: iters,
cmp: cmp,
strict: strict,
keys: make([][]byte, len(iters)),
}
}

View File

@ -0,0 +1,524 @@
// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0
// License, authors and contributors informations can be found at bellow URLs respectively:
// https://code.google.com/p/leveldb-go/source/browse/LICENSE
// https://code.google.com/p/leveldb-go/source/browse/AUTHORS
// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
// Package journal reads and writes sequences of journals. Each journal is a stream
// of bytes that completes before the next journal starts.
//
// When reading, call Next to obtain an io.Reader for the next journal. Next will
// return io.EOF when there are no more journals. It is valid to call Next
// without reading the current journal to exhaustion.
//
// When writing, call Next to obtain an io.Writer for the next journal. Calling
// Next finishes the current journal. Call Close to finish the final journal.
//
// Optionally, call Flush to finish the current journal and flush the underlying
// writer without starting a new journal. To start a new journal after flushing,
// call Next.
//
// Neither Readers or Writers are safe to use concurrently.
//
// Example code:
// func read(r io.Reader) ([]string, error) {
// var ss []string
// journals := journal.NewReader(r, nil, true, true)
// for {
// j, err := journals.Next()
// if err == io.EOF {
// break
// }
// if err != nil {
// return nil, err
// }
// s, err := ioutil.ReadAll(j)
// if err != nil {
// return nil, err
// }
// ss = append(ss, string(s))
// }
// return ss, nil
// }
//
// func write(w io.Writer, ss []string) error {
// journals := journal.NewWriter(w)
// for _, s := range ss {
// j, err := journals.Next()
// if err != nil {
// return err
// }
// if _, err := j.Write([]byte(s)), err != nil {
// return err
// }
// }
// return journals.Close()
// }
//
// The wire format is that the stream is divided into 32KiB blocks, and each
// block contains a number of tightly packed chunks. Chunks cannot cross block
// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a
// block must be zero.
//
// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4
// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type)
// followed by a payload. The checksum is over the chunk type and the payload.
//
// There are four chunk types: whether the chunk is the full journal, or the
// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal
// has one first chunk, zero or more middle chunks, and one last chunk.
//
// The wire format allows for limited recovery in the face of data corruption:
// on a format error (such as a checksum mismatch), the reader moves to the
// next block and looks for the next full or first chunk.
package journal
import (
"encoding/binary"
"fmt"
"io"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
// These constants are part of the wire format and should not be changed.
const (
fullChunkType = 1
firstChunkType = 2
middleChunkType = 3
lastChunkType = 4
)
const (
blockSize = 32 * 1024
headerSize = 7
)
type flusher interface {
Flush() error
}
// ErrCorrupted is the error type that generated by corrupted block or chunk.
type ErrCorrupted struct {
Size int
Reason string
}
func (e *ErrCorrupted) Error() string {
return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
}
// Dropper is the interface that wrap simple Drop method. The Drop
// method will be called when the journal reader dropping a block or chunk.
type Dropper interface {
Drop(err error)
}
// Reader reads journals from an underlying io.Reader.
type Reader struct {
// r is the underlying reader.
r io.Reader
// the dropper.
dropper Dropper
// strict flag.
strict bool
// checksum flag.
checksum bool
// seq is the sequence number of the current journal.
seq int
// buf[i:j] is the unread portion of the current chunk's payload.
// The low bound, i, excludes the chunk header.
i, j int
// n is the number of bytes of buf that are valid. Once reading has started,
// only the final block can have n < blockSize.
n int
// last is whether the current chunk is the last chunk of the journal.
last bool
// err is any accumulated error.
err error
// buf is the buffer.
buf [blockSize]byte
}
// NewReader returns a new reader. The dropper may be nil, and if
// strict is true then corrupted or invalid chunk will halt the journal
// reader entirely.
func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
return &Reader{
r: r,
dropper: dropper,
strict: strict,
checksum: checksum,
last: true,
}
}
var errSkip = errors.New("leveldb/journal: skipped")
func (r *Reader) corrupt(n int, reason string, skip bool) error {
if r.dropper != nil {
r.dropper.Drop(&ErrCorrupted{n, reason})
}
if r.strict && !skip {
r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason})
return r.err
}
return errSkip
}
// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
// next block into the buffer if necessary.
func (r *Reader) nextChunk(first bool) error {
for {
if r.j+headerSize <= r.n {
checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
chunkType := r.buf[r.j+6]
unprocBlock := r.n - r.j
if checksum == 0 && length == 0 && chunkType == 0 {
// Drop entire block.
r.i = r.n
r.j = r.n
return r.corrupt(unprocBlock, "zero header", false)
}
if chunkType < fullChunkType || chunkType > lastChunkType {
// Drop entire block.
r.i = r.n
r.j = r.n
return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false)
}
r.i = r.j + headerSize
r.j = r.j + headerSize + int(length)
if r.j > r.n {
// Drop entire block.
r.i = r.n
r.j = r.n
return r.corrupt(unprocBlock, "chunk length overflows block", false)
} else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
// Drop entire block.
r.i = r.n
r.j = r.n
return r.corrupt(unprocBlock, "checksum mismatch", false)
}
if first && chunkType != fullChunkType && chunkType != firstChunkType {
chunkLength := (r.j - r.i) + headerSize
r.i = r.j
// Report the error, but skip it.
return r.corrupt(chunkLength, "orphan chunk", true)
}
r.last = chunkType == fullChunkType || chunkType == lastChunkType
return nil
}
// The last block.
if r.n < blockSize && r.n > 0 {
if !first {
return r.corrupt(0, "missing chunk part", false)
}
r.err = io.EOF
return r.err
}
// Read block.
n, err := io.ReadFull(r.r, r.buf[:])
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err
}
if n == 0 {
if !first {
return r.corrupt(0, "missing chunk part", false)
}
r.err = io.EOF
return r.err
}
r.i, r.j, r.n = 0, 0, n
}
}
// Next returns a reader for the next journal. It returns io.EOF if there are no
// more journals. The reader returned becomes stale after the next Next call,
// and should no longer be used. If strict is false, the reader will returns
// io.ErrUnexpectedEOF error when found corrupted journal.
func (r *Reader) Next() (io.Reader, error) {
r.seq++
if r.err != nil {
return nil, r.err
}
r.i = r.j
for {
if err := r.nextChunk(true); err == nil {
break
} else if err != errSkip {
return nil, err
}
}
return &singleReader{r, r.seq, nil}, nil
}
// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
// last accumulated error.
func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
r.seq++
err := r.err
r.r = reader
r.dropper = dropper
r.strict = strict
r.checksum = checksum
r.i = 0
r.j = 0
r.n = 0
r.last = true
r.err = nil
return err
}
type singleReader struct {
r *Reader
seq int
err error
}
func (x *singleReader) Read(p []byte) (int, error) {
r := x.r
if r.seq != x.seq {
return 0, errors.New("leveldb/journal: stale reader")
}
if x.err != nil {
return 0, x.err
}
if r.err != nil {
return 0, r.err
}
for r.i == r.j {
if r.last {
return 0, io.EOF
}
x.err = r.nextChunk(false)
if x.err != nil {
if x.err == errSkip {
x.err = io.ErrUnexpectedEOF
}
return 0, x.err
}
}
n := copy(p, r.buf[r.i:r.j])
r.i += n
return n, nil
}
func (x *singleReader) ReadByte() (byte, error) {
r := x.r
if r.seq != x.seq {
return 0, errors.New("leveldb/journal: stale reader")
}
if x.err != nil {
return 0, x.err
}
if r.err != nil {
return 0, r.err
}
for r.i == r.j {
if r.last {
return 0, io.EOF
}
x.err = r.nextChunk(false)
if x.err != nil {
if x.err == errSkip {
x.err = io.ErrUnexpectedEOF
}
return 0, x.err
}
}
c := r.buf[r.i]
r.i++
return c, nil
}
// Writer writes journals to an underlying io.Writer.
type Writer struct {
// w is the underlying writer.
w io.Writer
// seq is the sequence number of the current journal.
seq int
// f is w as a flusher.
f flusher
// buf[i:j] is the bytes that will become the current chunk.
// The low bound, i, includes the chunk header.
i, j int
// buf[:written] has already been written to w.
// written is zero unless Flush has been called.
written int
// first is whether the current chunk is the first chunk of the journal.
first bool
// pending is whether a chunk is buffered but not yet written.
pending bool
// err is any accumulated error.
err error
// buf is the buffer.
buf [blockSize]byte
}
// NewWriter returns a new Writer.
func NewWriter(w io.Writer) *Writer {
f, _ := w.(flusher)
return &Writer{
w: w,
f: f,
}
}
// fillHeader fills in the header for the pending chunk.
func (w *Writer) fillHeader(last bool) {
if w.i+headerSize > w.j || w.j > blockSize {
panic("leveldb/journal: bad writer state")
}
if last {
if w.first {
w.buf[w.i+6] = fullChunkType
} else {
w.buf[w.i+6] = lastChunkType
}
} else {
if w.first {
w.buf[w.i+6] = firstChunkType
} else {
w.buf[w.i+6] = middleChunkType
}
}
binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value())
binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize))
}
// writeBlock writes the buffered block to the underlying writer, and reserves
// space for the next chunk's header.
func (w *Writer) writeBlock() {
_, w.err = w.w.Write(w.buf[w.written:])
w.i = 0
w.j = headerSize
w.written = 0
}
// writePending finishes the current journal and writes the buffer to the
// underlying writer.
func (w *Writer) writePending() {
if w.err != nil {
return
}
if w.pending {
w.fillHeader(true)
w.pending = false
}
_, w.err = w.w.Write(w.buf[w.written:w.j])
w.written = w.j
}
// Close finishes the current journal and closes the writer.
func (w *Writer) Close() error {
w.seq++
w.writePending()
if w.err != nil {
return w.err
}
w.err = errors.New("leveldb/journal: closed Writer")
return nil
}
// Flush finishes the current journal, writes to the underlying writer, and
// flushes it if that writer implements interface{ Flush() error }.
func (w *Writer) Flush() error {
w.seq++
w.writePending()
if w.err != nil {
return w.err
}
if w.f != nil {
w.err = w.f.Flush()
return w.err
}
return nil
}
// Reset resets the journal writer, allows reuse of the journal writer. Reset
// will also closes the journal writer if not already.
func (w *Writer) Reset(writer io.Writer) (err error) {
w.seq++
if w.err == nil {
w.writePending()
err = w.err
}
w.w = writer
w.f, _ = writer.(flusher)
w.i = 0
w.j = 0
w.written = 0
w.first = false
w.pending = false
w.err = nil
return
}
// Next returns a writer for the next journal. The writer returned becomes stale
// after the next Close, Flush or Next call, and should no longer be used.
func (w *Writer) Next() (io.Writer, error) {
w.seq++
if w.err != nil {
return nil, w.err
}
if w.pending {
w.fillHeader(true)
}
w.i = w.j
w.j = w.j + headerSize
// Check if there is room in the block for the header.
if w.j > blockSize {
// Fill in the rest of the block with zeroes.
for k := w.i; k < blockSize; k++ {
w.buf[k] = 0
}
w.writeBlock()
if w.err != nil {
return nil, w.err
}
}
w.first = true
w.pending = true
return singleWriter{w, w.seq}, nil
}
type singleWriter struct {
w *Writer
seq int
}
func (x singleWriter) Write(p []byte) (int, error) {
w := x.w
if w.seq != x.seq {
return 0, errors.New("leveldb/journal: stale writer")
}
if w.err != nil {
return 0, w.err
}
n0 := len(p)
for len(p) > 0 {
// Write a block, if it is full.
if w.j == blockSize {
w.fillHeader(false)
w.writeBlock()
if w.err != nil {
return 0, w.err
}
w.first = false
}
// Copy bytes into the buffer.
n := copy(w.buf[w.j:], p)
w.j += n
p = p[n:]
}
return n0, nil
}

Some files were not shown because too many files have changed in this diff Show More