From 31ee241de4ca93fca38f082f410f8e433e1ffabd Mon Sep 17 00:00:00 2001 From: Paul Lecuq Date: Sat, 12 Sep 2020 17:52:06 +0200 Subject: [PATCH] converted coronafana to xorm ORM --- cmd/coronafana/coronafana.go | 33 +- go.mod | 5 +- go.sum | 37 + src/config/main.go | 59 +- src/coronafana/main.go | 166 +- vendor/github.com/golang/snappy/.gitignore | 16 + vendor/github.com/golang/snappy/AUTHORS | 15 + vendor/github.com/golang/snappy/CONTRIBUTORS | 37 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/README | 107 ++ vendor/github.com/golang/snappy/decode.go | 237 +++ .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 ++++++ .../github.com/golang/snappy/decode_other.go | 101 ++ vendor/github.com/golang/snappy/encode.go | 285 ++++ .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 +++++++++ .../github.com/golang/snappy/encode_other.go | 238 +++ vendor/github.com/golang/snappy/snappy.go | 98 ++ vendor/github.com/jinzhu/gorm/.gitignore | 3 - vendor/github.com/jinzhu/gorm/License | 21 - vendor/github.com/jinzhu/gorm/README.md | 41 - vendor/github.com/jinzhu/gorm/association.go | 377 ----- vendor/github.com/jinzhu/gorm/callback.go | 250 --- .../github.com/jinzhu/gorm/callback_create.go | 197 --- .../github.com/jinzhu/gorm/callback_delete.go | 63 - .../github.com/jinzhu/gorm/callback_query.go | 109 -- .../jinzhu/gorm/callback_query_preload.go | 410 ----- .../jinzhu/gorm/callback_row_query.go | 41 - .../github.com/jinzhu/gorm/callback_save.go | 170 -- .../github.com/jinzhu/gorm/callback_update.go | 121 -- vendor/github.com/jinzhu/gorm/dialect.go | 147 -- .../github.com/jinzhu/gorm/dialect_common.go | 196 --- .../github.com/jinzhu/gorm/dialect_mysql.go | 246 --- .../jinzhu/gorm/dialect_postgres.go | 147 -- .../github.com/jinzhu/gorm/dialect_sqlite3.go | 107 -- .../github.com/jinzhu/gorm/docker-compose.yml | 30 - vendor/github.com/jinzhu/gorm/errors.go | 72 - vendor/github.com/jinzhu/gorm/field.go | 66 - vendor/github.com/jinzhu/gorm/go.mod | 15 - vendor/github.com/jinzhu/gorm/go.sum | 29 - vendor/github.com/jinzhu/gorm/interface.go | 24 - .../jinzhu/gorm/join_table_handler.go | 211 --- vendor/github.com/jinzhu/gorm/logger.go | 141 -- vendor/github.com/jinzhu/gorm/main.go | 881 ---------- vendor/github.com/jinzhu/gorm/model.go | 14 - vendor/github.com/jinzhu/gorm/model_struct.go | 671 -------- vendor/github.com/jinzhu/gorm/naming.go | 124 -- vendor/github.com/jinzhu/gorm/scope.go | 1421 ----------------- vendor/github.com/jinzhu/gorm/search.go | 153 -- vendor/github.com/jinzhu/gorm/test_all.sh | 5 - vendor/github.com/jinzhu/gorm/utils.go | 226 --- vendor/github.com/jinzhu/gorm/wercker.yml | 154 -- vendor/github.com/jinzhu/inflection/LICENSE | 21 - vendor/github.com/jinzhu/inflection/README.md | 55 - vendor/github.com/jinzhu/inflection/go.mod | 1 - .../jinzhu/inflection/inflections.go | 273 ---- .../github.com/jinzhu/inflection/wercker.yml | 23 - vendor/github.com/jmoiron/sqlx/.travis.yml | 27 - vendor/github.com/jmoiron/sqlx/LICENSE | 23 - vendor/github.com/jmoiron/sqlx/README.md | 187 --- vendor/github.com/jmoiron/sqlx/bind.go | 217 --- vendor/github.com/jmoiron/sqlx/doc.go | 12 - vendor/github.com/jmoiron/sqlx/go.mod | 7 - vendor/github.com/jmoiron/sqlx/go.sum | 6 - vendor/github.com/jmoiron/sqlx/named.go | 356 ----- .../github.com/jmoiron/sqlx/named_context.go | 132 -- .../jmoiron/sqlx/reflectx/README.md | 17 - .../jmoiron/sqlx/reflectx/reflect.go | 441 ----- vendor/github.com/jmoiron/sqlx/sqlx.go | 1045 ------------ .../github.com/jmoiron/sqlx/sqlx_context.go | 346 ---- vendor/github.com/syndtr/goleveldb/LICENSE | 24 + .../syndtr/goleveldb/leveldb/batch.go | 349 ++++ .../syndtr/goleveldb/leveldb/cache/cache.go | 704 ++++++++ .../syndtr/goleveldb/leveldb/cache/lru.go | 195 +++ .../syndtr/goleveldb/leveldb/comparer.go | 67 + .../leveldb/comparer/bytes_comparer.go | 51 + .../goleveldb/leveldb/comparer/comparer.go | 57 + .../github.com/syndtr/goleveldb/leveldb/db.go | 1179 ++++++++++++++ .../syndtr/goleveldb/leveldb/db_compaction.go | 854 ++++++++++ .../syndtr/goleveldb/leveldb/db_iter.go | 360 +++++ .../syndtr/goleveldb/leveldb/db_snapshot.go | 187 +++ .../syndtr/goleveldb/leveldb/db_state.go | 239 +++ .../goleveldb/leveldb/db_transaction.go | 329 ++++ .../syndtr/goleveldb/leveldb/db_util.go | 102 ++ .../syndtr/goleveldb/leveldb/db_write.go | 464 ++++++ .../syndtr/goleveldb/leveldb/doc.go | 92 ++ .../syndtr/goleveldb/leveldb/errors.go | 20 + .../syndtr/goleveldb/leveldb/errors/errors.go | 78 + .../syndtr/goleveldb/leveldb/filter.go | 31 + .../syndtr/goleveldb/leveldb/filter/bloom.go | 116 ++ .../syndtr/goleveldb/leveldb/filter/filter.go | 60 + .../goleveldb/leveldb/iterator/array_iter.go | 184 +++ .../leveldb/iterator/indexed_iter.go | 242 +++ .../syndtr/goleveldb/leveldb/iterator/iter.go | 132 ++ .../goleveldb/leveldb/iterator/merged_iter.go | 304 ++++ .../goleveldb/leveldb/journal/journal.go | 524 ++++++ .../syndtr/goleveldb/leveldb/key.go | 143 ++ .../syndtr/goleveldb/leveldb/memdb/memdb.go | 479 ++++++ .../syndtr/goleveldb/leveldb/opt/options.go | 697 ++++++++ .../syndtr/goleveldb/leveldb/options.go | 107 ++ .../syndtr/goleveldb/leveldb/session.go | 210 +++ .../goleveldb/leveldb/session_compaction.go | 302 ++++ .../goleveldb/leveldb/session_record.go | 323 ++++ .../syndtr/goleveldb/leveldb/session_util.go | 271 ++++ .../syndtr/goleveldb/leveldb/storage.go | 63 + .../goleveldb/leveldb/storage/file_storage.go | 671 ++++++++ .../leveldb/storage/file_storage_nacl.go | 34 + .../leveldb/storage/file_storage_plan9.go | 63 + .../leveldb/storage/file_storage_solaris.go | 81 + .../leveldb/storage/file_storage_unix.go | 98 ++ .../leveldb/storage/file_storage_windows.go | 78 + .../goleveldb/leveldb/storage/mem_storage.go | 222 +++ .../goleveldb/leveldb/storage/storage.go | 187 +++ .../syndtr/goleveldb/leveldb/table.go | 531 ++++++ .../syndtr/goleveldb/leveldb/table/reader.go | 1139 +++++++++++++ .../syndtr/goleveldb/leveldb/table/table.go | 177 ++ .../syndtr/goleveldb/leveldb/table/writer.go | 375 +++++ .../syndtr/goleveldb/leveldb/util.go | 98 ++ .../syndtr/goleveldb/leveldb/util/buffer.go | 293 ++++ .../goleveldb/leveldb/util/buffer_pool.go | 239 +++ .../syndtr/goleveldb/leveldb/util/crc32.go | 30 + .../syndtr/goleveldb/leveldb/util/hash.go | 48 + .../syndtr/goleveldb/leveldb/util/range.go | 32 + .../syndtr/goleveldb/leveldb/util/util.go | 73 + .../syndtr/goleveldb/leveldb/version.go | 528 ++++++ vendor/modules.txt | 37 +- vendor/xorm.io/builder/.drone.yml | 21 + vendor/xorm.io/builder/.gitignore | 1 + vendor/xorm.io/builder/LICENSE | 27 + vendor/xorm.io/builder/README.md | 206 +++ vendor/xorm.io/builder/builder.go | 321 ++++ vendor/xorm.io/builder/builder_delete.go | 27 + vendor/xorm.io/builder/builder_insert.go | 149 ++ vendor/xorm.io/builder/builder_join.go | 42 + vendor/xorm.io/builder/builder_limit.go | 103 ++ vendor/xorm.io/builder/builder_select.go | 158 ++ .../xorm.io/builder/builder_set_operations.go | 51 + vendor/xorm.io/builder/builder_update.go | 57 + vendor/xorm.io/builder/cond.go | 38 + vendor/xorm.io/builder/cond_and.go | 61 + vendor/xorm.io/builder/cond_between.go | 65 + vendor/xorm.io/builder/cond_compare.go | 160 ++ vendor/xorm.io/builder/cond_eq.go | 117 ++ vendor/xorm.io/builder/cond_expr.go | 43 + vendor/xorm.io/builder/cond_if.go | 49 + vendor/xorm.io/builder/cond_in.go | 237 +++ vendor/xorm.io/builder/cond_like.go | 41 + vendor/xorm.io/builder/cond_neq.go | 94 ++ vendor/xorm.io/builder/cond_not.go | 77 + vendor/xorm.io/builder/cond_notin.go | 234 +++ vendor/xorm.io/builder/cond_null.go | 59 + vendor/xorm.io/builder/cond_or.go | 69 + vendor/xorm.io/builder/doc.go | 120 ++ vendor/xorm.io/builder/error.go | 40 + vendor/xorm.io/builder/go.mod | 8 + vendor/xorm.io/builder/go.sum | 9 + vendor/xorm.io/builder/sql.go | 165 ++ vendor/xorm.io/builder/writer.go | 42 + vendor/xorm.io/xorm/.changelog.yml | 53 + vendor/xorm.io/xorm/.drone.yml | 337 ++++ .../jmoiron/sqlx => xorm.io/xorm}/.gitignore | 18 +- vendor/xorm.io/xorm/.revive.toml | 25 + vendor/xorm.io/xorm/CHANGELOG.md | 205 +++ vendor/xorm.io/xorm/CONTRIBUTING.md | 87 + vendor/xorm.io/xorm/LICENSE | 27 + vendor/xorm.io/xorm/Makefile | 224 +++ vendor/xorm.io/xorm/README.md | 482 ++++++ vendor/xorm.io/xorm/README_CN.md | 474 ++++++ vendor/xorm.io/xorm/caches/cache.go | 99 ++ vendor/xorm.io/xorm/caches/encode.go | 58 + vendor/xorm.io/xorm/caches/leveldb.go | 94 ++ vendor/xorm.io/xorm/caches/lru.go | 282 ++++ vendor/xorm.io/xorm/caches/manager.go | 56 + vendor/xorm.io/xorm/caches/memory_store.go | 49 + vendor/xorm.io/xorm/contexts/context_cache.go | 30 + vendor/xorm.io/xorm/contexts/hook.go | 75 + vendor/xorm.io/xorm/convert.go | 422 +++++ vendor/xorm.io/xorm/convert/conversion.go | 12 + vendor/xorm.io/xorm/core/db.go | 293 ++++ vendor/xorm.io/xorm/core/error.go | 14 + vendor/xorm.io/xorm/core/interface.go | 22 + vendor/xorm.io/xorm/core/rows.go | 338 ++++ vendor/xorm.io/xorm/core/scan.go | 66 + vendor/xorm.io/xorm/core/stmt.go | 194 +++ vendor/xorm.io/xorm/core/tx.go | 219 +++ vendor/xorm.io/xorm/dialects/dialect.go | 284 ++++ vendor/xorm.io/xorm/dialects/driver.go | 57 + vendor/xorm.io/xorm/dialects/filter.go | 43 + vendor/xorm.io/xorm/dialects/gen_reserved.sh | 6 + vendor/xorm.io/xorm/dialects/mssql.go | 596 +++++++ vendor/xorm.io/xorm/dialects/mysql.go | 671 ++++++++ vendor/xorm.io/xorm/dialects/oracle.go | 854 ++++++++++ vendor/xorm.io/xorm/dialects/pg_reserved.txt | 746 +++++++++ vendor/xorm.io/xorm/dialects/postgres.go | 1349 ++++++++++++++++ vendor/xorm.io/xorm/dialects/quote.go | 15 + vendor/xorm.io/xorm/dialects/sqlite3.go | 529 ++++++ vendor/xorm.io/xorm/dialects/table_name.go | 89 ++ vendor/xorm.io/xorm/dialects/time.go | 49 + vendor/xorm.io/xorm/doc.go | 184 +++ vendor/xorm.io/xorm/engine.go | 1303 +++++++++++++++ vendor/xorm.io/xorm/engine_group.go | 230 +++ vendor/xorm.io/xorm/engine_group_policy.go | 118 ++ vendor/xorm.io/xorm/error.go | 26 + vendor/xorm.io/xorm/go.mod | 14 + vendor/xorm.io/xorm/go.sum | 70 + vendor/xorm.io/xorm/interface.go | 130 ++ vendor/xorm.io/xorm/internal/json/json.go | 31 + .../xorm.io/xorm/internal/statements/cache.go | 79 + .../xorm/internal/statements/column_map.go | 66 + .../xorm/internal/statements/expr_param.go | 126 ++ .../xorm/internal/statements/insert.go | 207 +++ vendor/xorm.io/xorm/internal/statements/pk.go | 98 ++ .../xorm.io/xorm/internal/statements/query.go | 441 +++++ .../xorm/internal/statements/statement.go | 995 ++++++++++++ .../internal/statements/statement_args.go | 132 ++ .../xorm/internal/statements/update.go | 294 ++++ .../xorm/internal/statements/values.go | 154 ++ vendor/xorm.io/xorm/internal/utils/name.go | 13 + vendor/xorm.io/xorm/internal/utils/reflect.go | 13 + vendor/xorm.io/xorm/internal/utils/slice.go | 22 + vendor/xorm.io/xorm/internal/utils/sql.go | 19 + vendor/xorm.io/xorm/internal/utils/strings.go | 29 + vendor/xorm.io/xorm/internal/utils/zero.go | 145 ++ vendor/xorm.io/xorm/log/logger.go | 217 +++ vendor/xorm.io/xorm/log/logger_context.go | 116 ++ vendor/xorm.io/xorm/log/syslogger.go | 87 + vendor/xorm.io/xorm/names/mapper.go | 265 +++ vendor/xorm.io/xorm/names/table_name.go | 56 + vendor/xorm.io/xorm/processors.go | 144 ++ vendor/xorm.io/xorm/rows.go | 158 ++ vendor/xorm.io/xorm/schemas/column.go | 133 ++ vendor/xorm.io/xorm/schemas/index.go | 72 + vendor/xorm.io/xorm/schemas/pk.go | 41 + vendor/xorm.io/xorm/schemas/quote.go | 240 +++ vendor/xorm.io/xorm/schemas/table.go | 195 +++ vendor/xorm.io/xorm/schemas/type.go | 336 ++++ vendor/xorm.io/xorm/session.go | 912 +++++++++++ vendor/xorm.io/xorm/session_cols.go | 143 ++ vendor/xorm.io/xorm/session_cond.go | 55 + vendor/xorm.io/xorm/session_convert.go | 529 ++++++ vendor/xorm.io/xorm/session_delete.go | 251 +++ vendor/xorm.io/xorm/session_exist.go | 29 + vendor/xorm.io/xorm/session_find.go | 490 ++++++ vendor/xorm.io/xorm/session_get.go | 358 +++++ vendor/xorm.io/xorm/session_insert.go | 643 ++++++++ vendor/xorm.io/xorm/session_iterate.go | 105 ++ vendor/xorm.io/xorm/session_query.go | 257 +++ vendor/xorm.io/xorm/session_raw.go | 180 +++ vendor/xorm.io/xorm/session_schema.go | 490 ++++++ vendor/xorm.io/xorm/session_stats.go | 81 + vendor/xorm.io/xorm/session_tx.go | 86 + vendor/xorm.io/xorm/session_update.go | 532 ++++++ vendor/xorm.io/xorm/tags/parser.go | 308 ++++ vendor/xorm.io/xorm/tags/tag.go | 332 ++++ 255 files changed, 42340 insertions(+), 10243 deletions(-) create mode 100644 vendor/github.com/golang/snappy/.gitignore create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/README create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go delete mode 100644 vendor/github.com/jinzhu/gorm/.gitignore delete mode 100644 vendor/github.com/jinzhu/gorm/License delete mode 100644 vendor/github.com/jinzhu/gorm/README.md delete mode 100644 vendor/github.com/jinzhu/gorm/association.go delete mode 100644 vendor/github.com/jinzhu/gorm/callback.go delete mode 100644 vendor/github.com/jinzhu/gorm/callback_create.go delete mode 100644 vendor/github.com/jinzhu/gorm/callback_delete.go delete mode 100644 vendor/github.com/jinzhu/gorm/callback_query.go delete mode 100644 vendor/github.com/jinzhu/gorm/callback_query_preload.go delete mode 100644 vendor/github.com/jinzhu/gorm/callback_row_query.go delete mode 100644 vendor/github.com/jinzhu/gorm/callback_save.go delete mode 100644 vendor/github.com/jinzhu/gorm/callback_update.go delete mode 100644 vendor/github.com/jinzhu/gorm/dialect.go delete mode 100644 vendor/github.com/jinzhu/gorm/dialect_common.go delete mode 100644 vendor/github.com/jinzhu/gorm/dialect_mysql.go delete mode 100644 vendor/github.com/jinzhu/gorm/dialect_postgres.go delete mode 100644 vendor/github.com/jinzhu/gorm/dialect_sqlite3.go delete mode 100644 vendor/github.com/jinzhu/gorm/docker-compose.yml delete mode 100644 vendor/github.com/jinzhu/gorm/errors.go delete mode 100644 vendor/github.com/jinzhu/gorm/field.go delete mode 100644 vendor/github.com/jinzhu/gorm/go.mod delete mode 100644 vendor/github.com/jinzhu/gorm/go.sum delete mode 100644 vendor/github.com/jinzhu/gorm/interface.go delete mode 100644 vendor/github.com/jinzhu/gorm/join_table_handler.go delete mode 100644 vendor/github.com/jinzhu/gorm/logger.go delete mode 100644 vendor/github.com/jinzhu/gorm/main.go delete mode 100644 vendor/github.com/jinzhu/gorm/model.go delete mode 100644 vendor/github.com/jinzhu/gorm/model_struct.go delete mode 100644 vendor/github.com/jinzhu/gorm/naming.go delete mode 100644 vendor/github.com/jinzhu/gorm/scope.go delete mode 100644 vendor/github.com/jinzhu/gorm/search.go delete mode 100644 vendor/github.com/jinzhu/gorm/test_all.sh delete mode 100644 vendor/github.com/jinzhu/gorm/utils.go delete mode 100644 vendor/github.com/jinzhu/gorm/wercker.yml delete mode 100644 vendor/github.com/jinzhu/inflection/LICENSE delete mode 100644 vendor/github.com/jinzhu/inflection/README.md delete mode 100644 vendor/github.com/jinzhu/inflection/go.mod delete mode 100644 vendor/github.com/jinzhu/inflection/inflections.go delete mode 100644 vendor/github.com/jinzhu/inflection/wercker.yml delete mode 100644 vendor/github.com/jmoiron/sqlx/.travis.yml delete mode 100644 vendor/github.com/jmoiron/sqlx/LICENSE delete mode 100644 vendor/github.com/jmoiron/sqlx/README.md delete mode 100644 vendor/github.com/jmoiron/sqlx/bind.go delete mode 100644 vendor/github.com/jmoiron/sqlx/doc.go delete mode 100644 vendor/github.com/jmoiron/sqlx/go.mod delete mode 100644 vendor/github.com/jmoiron/sqlx/go.sum delete mode 100644 vendor/github.com/jmoiron/sqlx/named.go delete mode 100644 vendor/github.com/jmoiron/sqlx/named_context.go delete mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/README.md delete mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/reflect.go delete mode 100644 vendor/github.com/jmoiron/sqlx/sqlx.go delete mode 100644 vendor/github.com/jmoiron/sqlx/sqlx_context.go create mode 100644 vendor/github.com/syndtr/goleveldb/LICENSE create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/batch.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_state.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_util.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_write.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/doc.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/errors.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/key.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/options.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_record.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_util.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/table.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/range.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/util.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/version.go create mode 100644 vendor/xorm.io/builder/.drone.yml create mode 100644 vendor/xorm.io/builder/.gitignore create mode 100644 vendor/xorm.io/builder/LICENSE create mode 100644 vendor/xorm.io/builder/README.md create mode 100644 vendor/xorm.io/builder/builder.go create mode 100644 vendor/xorm.io/builder/builder_delete.go create mode 100644 vendor/xorm.io/builder/builder_insert.go create mode 100644 vendor/xorm.io/builder/builder_join.go create mode 100644 vendor/xorm.io/builder/builder_limit.go create mode 100644 vendor/xorm.io/builder/builder_select.go create mode 100644 vendor/xorm.io/builder/builder_set_operations.go create mode 100644 vendor/xorm.io/builder/builder_update.go create mode 100644 vendor/xorm.io/builder/cond.go create mode 100644 vendor/xorm.io/builder/cond_and.go create mode 100644 vendor/xorm.io/builder/cond_between.go create mode 100644 vendor/xorm.io/builder/cond_compare.go create mode 100644 vendor/xorm.io/builder/cond_eq.go create mode 100644 vendor/xorm.io/builder/cond_expr.go create mode 100644 vendor/xorm.io/builder/cond_if.go create mode 100644 vendor/xorm.io/builder/cond_in.go create mode 100644 vendor/xorm.io/builder/cond_like.go create mode 100644 vendor/xorm.io/builder/cond_neq.go create mode 100644 vendor/xorm.io/builder/cond_not.go create mode 100644 vendor/xorm.io/builder/cond_notin.go create mode 100644 vendor/xorm.io/builder/cond_null.go create mode 100644 vendor/xorm.io/builder/cond_or.go create mode 100644 vendor/xorm.io/builder/doc.go create mode 100644 vendor/xorm.io/builder/error.go create mode 100644 vendor/xorm.io/builder/go.mod create mode 100644 vendor/xorm.io/builder/go.sum create mode 100644 vendor/xorm.io/builder/sql.go create mode 100644 vendor/xorm.io/builder/writer.go create mode 100644 vendor/xorm.io/xorm/.changelog.yml create mode 100644 vendor/xorm.io/xorm/.drone.yml rename vendor/{github.com/jmoiron/sqlx => xorm.io/xorm}/.gitignore (65%) create mode 100644 vendor/xorm.io/xorm/.revive.toml create mode 100644 vendor/xorm.io/xorm/CHANGELOG.md create mode 100644 vendor/xorm.io/xorm/CONTRIBUTING.md create mode 100644 vendor/xorm.io/xorm/LICENSE create mode 100644 vendor/xorm.io/xorm/Makefile create mode 100644 vendor/xorm.io/xorm/README.md create mode 100644 vendor/xorm.io/xorm/README_CN.md create mode 100644 vendor/xorm.io/xorm/caches/cache.go create mode 100644 vendor/xorm.io/xorm/caches/encode.go create mode 100644 vendor/xorm.io/xorm/caches/leveldb.go create mode 100644 vendor/xorm.io/xorm/caches/lru.go create mode 100644 vendor/xorm.io/xorm/caches/manager.go create mode 100644 vendor/xorm.io/xorm/caches/memory_store.go create mode 100644 vendor/xorm.io/xorm/contexts/context_cache.go create mode 100644 vendor/xorm.io/xorm/contexts/hook.go create mode 100644 vendor/xorm.io/xorm/convert.go create mode 100644 vendor/xorm.io/xorm/convert/conversion.go create mode 100644 vendor/xorm.io/xorm/core/db.go create mode 100644 vendor/xorm.io/xorm/core/error.go create mode 100644 vendor/xorm.io/xorm/core/interface.go create mode 100644 vendor/xorm.io/xorm/core/rows.go create mode 100644 vendor/xorm.io/xorm/core/scan.go create mode 100644 vendor/xorm.io/xorm/core/stmt.go create mode 100644 vendor/xorm.io/xorm/core/tx.go create mode 100644 vendor/xorm.io/xorm/dialects/dialect.go create mode 100644 vendor/xorm.io/xorm/dialects/driver.go create mode 100644 vendor/xorm.io/xorm/dialects/filter.go create mode 100644 vendor/xorm.io/xorm/dialects/gen_reserved.sh create mode 100644 vendor/xorm.io/xorm/dialects/mssql.go create mode 100644 vendor/xorm.io/xorm/dialects/mysql.go create mode 100644 vendor/xorm.io/xorm/dialects/oracle.go create mode 100644 vendor/xorm.io/xorm/dialects/pg_reserved.txt create mode 100644 vendor/xorm.io/xorm/dialects/postgres.go create mode 100644 vendor/xorm.io/xorm/dialects/quote.go create mode 100644 vendor/xorm.io/xorm/dialects/sqlite3.go create mode 100644 vendor/xorm.io/xorm/dialects/table_name.go create mode 100644 vendor/xorm.io/xorm/dialects/time.go create mode 100644 vendor/xorm.io/xorm/doc.go create mode 100644 vendor/xorm.io/xorm/engine.go create mode 100644 vendor/xorm.io/xorm/engine_group.go create mode 100644 vendor/xorm.io/xorm/engine_group_policy.go create mode 100644 vendor/xorm.io/xorm/error.go create mode 100644 vendor/xorm.io/xorm/go.mod create mode 100644 vendor/xorm.io/xorm/go.sum create mode 100644 vendor/xorm.io/xorm/interface.go create mode 100644 vendor/xorm.io/xorm/internal/json/json.go create mode 100644 vendor/xorm.io/xorm/internal/statements/cache.go create mode 100644 vendor/xorm.io/xorm/internal/statements/column_map.go create mode 100644 vendor/xorm.io/xorm/internal/statements/expr_param.go create mode 100644 vendor/xorm.io/xorm/internal/statements/insert.go create mode 100644 vendor/xorm.io/xorm/internal/statements/pk.go create mode 100644 vendor/xorm.io/xorm/internal/statements/query.go create mode 100644 vendor/xorm.io/xorm/internal/statements/statement.go create mode 100644 vendor/xorm.io/xorm/internal/statements/statement_args.go create mode 100644 vendor/xorm.io/xorm/internal/statements/update.go create mode 100644 vendor/xorm.io/xorm/internal/statements/values.go create mode 100644 vendor/xorm.io/xorm/internal/utils/name.go create mode 100644 vendor/xorm.io/xorm/internal/utils/reflect.go create mode 100644 vendor/xorm.io/xorm/internal/utils/slice.go create mode 100644 vendor/xorm.io/xorm/internal/utils/sql.go create mode 100644 vendor/xorm.io/xorm/internal/utils/strings.go create mode 100644 vendor/xorm.io/xorm/internal/utils/zero.go create mode 100644 vendor/xorm.io/xorm/log/logger.go create mode 100644 vendor/xorm.io/xorm/log/logger_context.go create mode 100644 vendor/xorm.io/xorm/log/syslogger.go create mode 100644 vendor/xorm.io/xorm/names/mapper.go create mode 100644 vendor/xorm.io/xorm/names/table_name.go create mode 100644 vendor/xorm.io/xorm/processors.go create mode 100644 vendor/xorm.io/xorm/rows.go create mode 100644 vendor/xorm.io/xorm/schemas/column.go create mode 100644 vendor/xorm.io/xorm/schemas/index.go create mode 100644 vendor/xorm.io/xorm/schemas/pk.go create mode 100644 vendor/xorm.io/xorm/schemas/quote.go create mode 100644 vendor/xorm.io/xorm/schemas/table.go create mode 100644 vendor/xorm.io/xorm/schemas/type.go create mode 100644 vendor/xorm.io/xorm/session.go create mode 100644 vendor/xorm.io/xorm/session_cols.go create mode 100644 vendor/xorm.io/xorm/session_cond.go create mode 100644 vendor/xorm.io/xorm/session_convert.go create mode 100644 vendor/xorm.io/xorm/session_delete.go create mode 100644 vendor/xorm.io/xorm/session_exist.go create mode 100644 vendor/xorm.io/xorm/session_find.go create mode 100644 vendor/xorm.io/xorm/session_get.go create mode 100644 vendor/xorm.io/xorm/session_insert.go create mode 100644 vendor/xorm.io/xorm/session_iterate.go create mode 100644 vendor/xorm.io/xorm/session_query.go create mode 100644 vendor/xorm.io/xorm/session_raw.go create mode 100644 vendor/xorm.io/xorm/session_schema.go create mode 100644 vendor/xorm.io/xorm/session_stats.go create mode 100644 vendor/xorm.io/xorm/session_tx.go create mode 100644 vendor/xorm.io/xorm/session_update.go create mode 100644 vendor/xorm.io/xorm/tags/parser.go create mode 100644 vendor/xorm.io/xorm/tags/tag.go diff --git a/cmd/coronafana/coronafana.go b/cmd/coronafana/coronafana.go index 524f4a4..ff973a8 100644 --- a/cmd/coronafana/coronafana.go +++ b/cmd/coronafana/coronafana.go @@ -7,7 +7,8 @@ import ( "git.paulbsd.com/paulbsd/coronafana/src/config" "git.paulbsd.com/paulbsd/coronafana/src/coronafana" _ "github.com/go-sql-driver/mysql" - "github.com/jmoiron/sqlx" + "xorm.io/xorm" + "xorm.io/xorm/names" ) func main() { @@ -20,7 +21,7 @@ func main() { log.Fatalln(err) } - db, err := sqlx.Connect("mysql", + db, err := xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s", cfg.DbUsername, cfg.DbPassword, @@ -29,15 +30,13 @@ func main() { if err != nil { log.Fatalln(err) } + defer db.Close() - _, err = db.Exec(cfg.DbSchemaGlobal) - if err != nil { - log.Fatalln(err) - } + db.SetMapper(names.GonicMapper{}) + db.ShowSQL(cfg.Debug) - _, err = db.Exec(cfg.DbSchemaPays) - if err != nil { - log.Fatalln(err) + for object := range []interface{}{new(coronafana.Coronaglobaldata), new(coronafana.Coronapaysdata)} { + err = db.Sync2(object) } cr, err = coronafana.GetData(cfg) @@ -45,21 +44,9 @@ func main() { log.Fatalln(err) } - err = cr.GetMaxDates(cfg, *db) + // Process data + err = cr.InsertData(cfg, *db) if err != nil { log.Fatalln(err) } - - // Processes data for 'Global' - err = cr.InsertGlobalData(cfg, *db) - if err != nil { - log.Fatalln(err) - } - - // Processes data for 'Pays' - err = cr.InsertPaysData(cfg, *db) - if err != nil { - log.Fatalln(err) - } - } diff --git a/go.mod b/go.mod index 08b4f8b..51c509b 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,9 @@ go 1.14 require ( github.com/go-sql-driver/mysql v1.5.0 - github.com/jinzhu/gorm v1.9.12 - github.com/jmoiron/sqlx v1.2.0 + github.com/jinzhu/gorm v1.9.12 // indirect + github.com/jmoiron/sqlx v1.2.0 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect gopkg.in/ini.v1 v1.54.0 + xorm.io/xorm v1.0.4 ) diff --git a/go.sum b/go.sum index 85cf4d4..24c77b0 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,13 @@ +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= @@ -9,8 +15,11 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/jinzhu/gorm v1.9.12 h1:Drgk1clyWT9t9ERbzHza6Mj/8FY/CqMyVzOiHviMo6Q= github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -24,25 +33,53 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw= github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd h1:GGJVjV8waZKRHrgwvtH66z9ZGVurTD1MT0n1Bb+q4aM= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.54.0 h1:oM5ElzbIi7gwLnNbPX2M25ED1vSAK3B6dex50eS/6Fs= gopkg.in/ini.v1 v1.54.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= +xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= +xorm.io/xorm v1.0.4 h1:UBXA4I3NhiyjXfPqxXUkS2t5hMta9SSPATeMMaZg9oA= +xorm.io/xorm v1.0.4/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4= diff --git a/src/config/main.go b/src/config/main.go index cd9c5a7..9dd8100 100644 --- a/src/config/main.go +++ b/src/config/main.go @@ -10,10 +10,12 @@ import ( // GetConfig fetch configuration func (config *Config) GetConfig() (err error) { var configfile string + var debug bool flag.Usage = utils.Usage flag.StringVar(&configfile, "configfile", "coronafana.ini", "Config file to use with coronafana section") + flag.BoolVar(&debug, "debug", false, "Set debug flag") flag.Parse() cfg, err := ini.Load(configfile) @@ -22,6 +24,7 @@ func (config *Config) GetConfig() (err error) { } *config = Config{ + Debug: debug, DbHostname: "localhost", DbName: "database", DbUsername: "username", @@ -35,58 +38,11 @@ func (config *Config) GetConfig() (err error) { return } - config.DbSchemaGlobal = ` - CREATE TABLE IF NOT EXISTS coronaglobaldata - ( - id int(8) NOT NULL AUTO_INCREMENT, - date datetime NOT NULL, - infection int(8) DEFAULT NULL, - deces int(8) DEFAULT NULL, - guerisons varchar(50) DEFAULT NULL, - tauxdeces float(10) DEFAULT NULL, - tauxguerison float(10) DEFAULT NULL, - tauxinfection float(10) DEFAULT NULL, - PRIMARY KEY (id), - UNIQUE KEY date (date) USING BTREE - ) - ENGINE=InnoDB DEFAULT CHARSET=utf8; - ` - - config.DbInsertGlobal = ` - INSERT INTO coronaglobaldata (date, infection, deces, guerisons, tauxdeces, tauxguerison, tauxinfection) - VALUES (:date, :infection, :deces, :guerisons, :tauxdeces, :tauxguerison, :tauxinfection) - ON DUPLICATE KEY UPDATE date=:date,infection=:infection, deces=:deces, guerisons=:guerisons, tauxdeces=:tauxdeces, tauxguerison=:tauxguerison, tauxinfection=:tauxinfection; - ` - config.DbMaxDateGlobal = ` - SELECT IFNULL(SUBDATE(MAX(date), INTERVAL 3 DAY), "0001-01-01 00:00:00") + SELECT IFNULL(SUBDATE(MAX(date), INTERVAL 2 DAY), "0001-01-01 00:00:00") FROM coronaglobaldata; ` - config.DbSchemaPays = ` - CREATE TABLE IF NOT EXISTS coronapaysdata - ( - id int(8) NOT NULL AUTO_INCREMENT, - date datetime NOT NULL, - pays varchar(50) NOT NULL, - infection int(8) DEFAULT NULL, - deces int(8) DEFAULT NULL, - guerisons varchar(50) DEFAULT NULL, - tauxdeces float(10) DEFAULT NULL, - tauxguerison float(10) DEFAULT NULL, - tauxinfection float(10) DEFAULT NULL, - PRIMARY KEY (id), - UNIQUE KEY date (date,pays) USING BTREE - ) - ENGINE=InnoDB DEFAULT CHARSET=utf8; - ` - - config.DbInsertPays = ` - INSERT INTO coronapaysdata (date, pays, infection, deces, guerisons, tauxdeces, tauxguerison, tauxinfection) - VALUES (:date, :pays, :infection, :deces, :guerisons, :tauxdeces, :tauxguerison, :tauxinfection) - ON DUPLICATE KEY UPDATE date=:date, pays=:pays,infection=:infection, deces=:deces, guerisons=:guerisons, tauxdeces=:tauxdeces, tauxguerison=:tauxguerison, tauxinfection=:tauxinfection; - ` - config.DbMaxDatePays = ` SELECT IFNULL(SUBDATE(MAX(date), INTERVAL 3 DAY), "0001-01-01 00:00:00") FROM coronapaysdata; @@ -97,16 +53,13 @@ func (config *Config) GetConfig() (err error) { // Config is the global config of g2g type Config struct { + CoronaURL string + Debug bool DbHostname string `ini:"db_hostname"` DbName string `ini:"db_name"` DbUsername string `ini:"db_username"` DbPassword string `ini:"db_password"` DbTable string `ini:"db_table"` - DbSchemaGlobal string - DbInsertGlobal string DbMaxDateGlobal string - DbSchemaPays string - DbInsertPays string DbMaxDatePays string - CoronaURL string } diff --git a/src/coronafana/main.go b/src/coronafana/main.go index bd64702..25dddeb 100644 --- a/src/coronafana/main.go +++ b/src/coronafana/main.go @@ -10,8 +10,7 @@ import ( "time" "git.paulbsd.com/paulbsd/coronafana/src/config" - "github.com/jinzhu/gorm" - "github.com/jmoiron/sqlx" + "xorm.io/xorm" ) // GetData fetch data from open data portal @@ -46,20 +45,41 @@ func GetData(cfg config.Config) (cr Coronafana, err error) { return } -// InsertGlobalData insert data to MySQL / MariaDB -func (cr Coronafana) InsertGlobalData(cfg config.Config, db sqlx.DB) (err error) { - var tx *sqlx.Tx +// InsertData insert data to MySQL / MariaDB +func (cr Coronafana) InsertData(cfg config.Config, db xorm.Engine) (err error) { + var tx *xorm.Session var i int + var cgd []Coronaglobaldata + var cpd []Coronapaysdata + tx = db.NewSession() + defer tx.Close() + + log.Println("Getting max dates") + + err = db.Desc("date").Limit(1, 3).Find(&cgd) + globalparseddate := cgd[0].Date + if err != nil { + return + } + + err = db.Desc("date").Limit(1, 3).Find(&cpd) + paysparseddate := cpd[0].Date + if err != nil { + return + } + + tx.Begin() + + i = 0 log.Println("Start inserting global data ...") - tx = db.MustBegin() - txStmtglobal, err := tx.PrepareNamed(cfg.DbInsertGlobal) - - for _, dt := range cr.GlobalData { - if t, _ := GetParsedDate(dt.Date); t.After(cr.MaxDateGlobal) { - _, err = txStmtglobal.Exec(&dt) + for _, dt := range cr.Coronaglobaldata { + if dt.Date, err = GetParsedDate(dt.DateString); dt.Date.After(globalparseddate) { + _, err = tx.Insert(&dt) if err != nil { - return + if !strings.Contains(err.Error(), "Error 1062") { + return + } } i++ } @@ -71,30 +91,20 @@ func (cr Coronafana) InsertGlobalData(cfg config.Config, db sqlx.DB) (err error) } if i > 0 { - log.Println(fmt.Sprintf("%d global entries to inserted", i)) + log.Println(fmt.Sprintf("%d entries inserted", i)) } else { log.Println("No entry inserted") } - return -} - -// InsertPaysData insert data to MySQL / MariaDB -func (cr Coronafana) InsertPaysData(cfg config.Config, db sqlx.DB) (err error) { - var tx *sqlx.Tx - var i int - - log.Println("Start inserting country data ...") - tx = db.MustBegin() - cr.ReplacePaysQuotes() - - txStmtpays, err := tx.PrepareNamed(cfg.DbInsertPays) - - for _, dt := range cr.PaysData { - if t, _ := GetParsedDate(dt.Date); t.After(cr.MaxDatePays) { - _, err = txStmtpays.Exec(&dt) + i = 0 + log.Println("Start inserting pays data ...") + for _, dt := range cr.Coronapaysdata { + if dt.Date, err = GetParsedDate(dt.DateString); dt.Date.After(paysparseddate) { + _, err = tx.Insert(&dt) if err != nil { - return + if !strings.Contains(err.Error(), "Error 1062") { + return + } } i++ } @@ -106,39 +116,10 @@ func (cr Coronafana) InsertPaysData(cfg config.Config, db sqlx.DB) (err error) { } if i > 0 { - log.Println(fmt.Sprintf("%d global entries to inserted", i)) + log.Println(fmt.Sprintf("%d entries inserted", i)) } else { log.Println("No entry inserted") } - return -} - -// ReplacePaysQuotes escapes quotes in Pays names -func (cr *Coronafana) ReplacePaysQuotes() (err error) { - for _, pdata := range cr.PaysData { - pdata.Pays = strings.Replace(pdata.Pays, `'`, `\'`, -1) - } - - return -} - -// GetMaxDates get max dates on table -func (cr *Coronafana) GetMaxDates(cfg config.Config, db sqlx.DB) (err error) { - var maxDateGlobal, maxDatePays string - - err = db.Get(&maxDateGlobal, cfg.DbMaxDateGlobal) - if err != nil { - return - } - - err = db.Get(&maxDatePays, cfg.DbMaxDatePays) - - if err != nil { - return - } - - cr.MaxDateGlobal, err = time.Parse("2006-01-02 15:04:05", maxDateGlobal) - cr.MaxDatePays, err = time.Parse("2006-01-02 15:04:05", maxDatePays) return } @@ -151,29 +132,42 @@ func GetParsedDate(date string) (res time.Time, err error) { // Coronafana is the main struct type Coronafana struct { - gorm.Model - Source string `json:"Source"` - Information string `json:"Information"` - Utilisation string `json:"Utilisation"` - MaxDateGlobal time.Time - MaxDatePays time.Time - GlobalData []struct { - Date string `json:"Date" gorm:"primary_key" db:"date"` - Infection int `json:"Infection" db:"infection"` - Deces int `json:"Deces" db:"deces"` - Guerisons int `json:"Guerisons" db:"guerisons"` - TauxDeces float64 `json:"TauxDeces" db:"tauxdeces"` - TauxGuerison float64 `json:"TauxGuerison" db:"tauxguerison"` - TauxInfection float64 `json:"TauxInfection" db:"tauxinfection"` - } `json:"GlobalData"` - PaysData []struct { - Date string `json:"Date" gorm:"primary_key" db:"date"` - Pays string `json:"Pays" db:"pays"` - Infection int `json:"Infection" db:"infection"` - Deces int `json:"Deces" db:"deces"` - Guerisons int `json:"Guerisons" db:"guerisons"` - TauxDeces float64 `json:"TauxDeces" db:"tauxdeces"` - TauxGuerison float64 `json:"TauxGuerison" db:"tauxguerison"` - TauxInfection float64 `json:"TauxInfection" db:"tauxinfection"` - } `json:"PaysData"` + Source string `json:"Source"` + Information string `json:"Information"` + Utilisation string `json:"Utilisation"` + MaxDateGlobal time.Time + MaxDatePays time.Time + Coronaglobaldata []Coronaglobaldata `json:"GlobalData"` + Coronapaysdata []Coronapaysdata `json:"PaysData"` +} + +// Coronaglobaldata relates to global data +type Coronaglobaldata struct { + ID int `xorm:"not null pk autoincr INT(8)"` + Date time.Time `xorm:"not null unique(date) DATETIME"` + Infection int `json:"Infection" xorm:"default NULL INT(8)"` + Deces int `json:"Deces" xorm:"default NULL INT(8)"` + Guerisons int `json:"Guerisons" xorm:"default NULL INT(8)"` + Tauxdeces float64 `json:"TauxDeces"` + Tauxguerison float64 `json:"TauxGuerison"` + Tauxinfection float64 `json:"TauxInfection"` + DateString string `json:"Date" xorm:"-"` +} + +// Coronapaysdata relates to country data +type Coronapaysdata struct { + ID int `xorm:"not null pk autoincr INT(8)"` + Date time.Time `xorm:"not null unique(date) DATETIME"` + Pays string `json:"Pays" xorm:"not null unique(date) VARCHAR(50)"` + Infection int `json:"Infection" xorm:"default NULL INT(8)"` + Deces int `json:"Deces" xorm:"default NULL INT(8)"` + Guerisons int `json:"Guerisons" xorm:"default NULL INT(8)"` + Tauxdeces float64 `json:"TauxDeces" xorm:"default NULL FLOAT"` + Tauxguerison float64 `json:"TauxGuerison" xorm:"default NULL FLOAT"` + Tauxinfection float64 `json:"TauxInfection" xorm:"default NULL FLOAT"` + DateString string `json:"Date" xorm:"-"` +} + +type calc interface { + GetMaxDate() time.Time } diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 0000000..042091d --- /dev/null +++ b/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 0000000..bcfa195 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 0000000..931ae31 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 0000000..6050c10 --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 0000000..cea1287 --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 0000000..72efb03 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 0000000..fcd192b --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 0000000..e6179f6 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 0000000..8c9f204 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 0000000..8d393e9 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 0000000..150d91b --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 0000000..adfd979 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 0000000..dbcae90 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 0000000..ece692e --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/jinzhu/gorm/.gitignore b/vendor/github.com/jinzhu/gorm/.gitignore deleted file mode 100644 index 117f92f..0000000 --- a/vendor/github.com/jinzhu/gorm/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -documents -coverage.txt -_book diff --git a/vendor/github.com/jinzhu/gorm/License b/vendor/github.com/jinzhu/gorm/License deleted file mode 100644 index 037e165..0000000 --- a/vendor/github.com/jinzhu/gorm/License +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013-NOW Jinzhu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/jinzhu/gorm/README.md b/vendor/github.com/jinzhu/gorm/README.md deleted file mode 100644 index 6d23110..0000000 --- a/vendor/github.com/jinzhu/gorm/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# GORM - -The fantastic ORM library for Golang, aims to be developer friendly. - -[![go report card](https://goreportcard.com/badge/github.com/jinzhu/gorm "go report card")](https://goreportcard.com/report/github.com/jinzhu/gorm) -[![wercker status](https://app.wercker.com/status/8596cace912c9947dd9c8542ecc8cb8b/s/master "wercker status")](https://app.wercker.com/project/byKey/8596cace912c9947dd9c8542ecc8cb8b) -[![codecov](https://codecov.io/gh/jinzhu/gorm/branch/master/graph/badge.svg)](https://codecov.io/gh/jinzhu/gorm) -[![Join the chat at https://gitter.im/jinzhu/gorm](https://img.shields.io/gitter/room/jinzhu/gorm.svg)](https://gitter.im/jinzhu/gorm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Open Collective Backer](https://opencollective.com/gorm/tiers/backer/badge.svg?label=backer&color=brightgreen "Open Collective Backer")](https://opencollective.com/gorm) -[![Open Collective Sponsor](https://opencollective.com/gorm/tiers/sponsor/badge.svg?label=sponsor&color=brightgreen "Open Collective Sponsor")](https://opencollective.com/gorm) -[![MIT license](https://img.shields.io/badge/license-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT) -[![GoDoc](https://godoc.org/github.com/jinzhu/gorm?status.svg)](https://godoc.org/github.com/jinzhu/gorm) - -## Overview - -* Full-Featured ORM (almost) -* Associations (Has One, Has Many, Belongs To, Many To Many, Polymorphism) -* Hooks (Before/After Create/Save/Update/Delete/Find) -* Preloading (eager loading) -* Transactions -* Composite Primary Key -* SQL Builder -* Auto Migrations -* Logger -* Extendable, write Plugins based on GORM callbacks -* Every feature comes with tests -* Developer Friendly - -## Getting Started - -* GORM Guides [https://gorm.io](https://gorm.io) - -## Contributing - -[You can help to deliver a better GORM, check out things you can do](https://gorm.io/contribute.html) - -## License - -© Jinzhu, 2013~time.Now - -Released under the [MIT License](https://github.com/jinzhu/gorm/blob/master/License) diff --git a/vendor/github.com/jinzhu/gorm/association.go b/vendor/github.com/jinzhu/gorm/association.go deleted file mode 100644 index a73344f..0000000 --- a/vendor/github.com/jinzhu/gorm/association.go +++ /dev/null @@ -1,377 +0,0 @@ -package gorm - -import ( - "errors" - "fmt" - "reflect" -) - -// Association Mode contains some helper methods to handle relationship things easily. -type Association struct { - Error error - scope *Scope - column string - field *Field -} - -// Find find out all related associations -func (association *Association) Find(value interface{}) *Association { - association.scope.related(value, association.column) - return association.setErr(association.scope.db.Error) -} - -// Append append new associations for many2many, has_many, replace current association for has_one, belongs_to -func (association *Association) Append(values ...interface{}) *Association { - if association.Error != nil { - return association - } - - if relationship := association.field.Relationship; relationship.Kind == "has_one" { - return association.Replace(values...) - } - return association.saveAssociations(values...) -} - -// Replace replace current associations with new one -func (association *Association) Replace(values ...interface{}) *Association { - if association.Error != nil { - return association - } - - var ( - relationship = association.field.Relationship - scope = association.scope - field = association.field.Field - newDB = scope.NewDB() - ) - - // Append new values - association.field.Set(reflect.Zero(association.field.Field.Type())) - association.saveAssociations(values...) - - // Belongs To - if relationship.Kind == "belongs_to" { - // Set foreign key to be null when clearing value (length equals 0) - if len(values) == 0 { - // Set foreign key to be nil - var foreignKeyMap = map[string]interface{}{} - for _, foreignKey := range relationship.ForeignDBNames { - foreignKeyMap[foreignKey] = nil - } - association.setErr(newDB.Model(scope.Value).UpdateColumn(foreignKeyMap).Error) - } - } else { - // Polymorphic Relations - if relationship.PolymorphicDBName != "" { - newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue) - } - - // Delete Relations except new created - if len(values) > 0 { - var associationForeignFieldNames, associationForeignDBNames []string - if relationship.Kind == "many_to_many" { - // if many to many relations, get association fields name from association foreign keys - associationScope := scope.New(reflect.New(field.Type()).Interface()) - for idx, dbName := range relationship.AssociationForeignFieldNames { - if field, ok := associationScope.FieldByName(dbName); ok { - associationForeignFieldNames = append(associationForeignFieldNames, field.Name) - associationForeignDBNames = append(associationForeignDBNames, relationship.AssociationForeignDBNames[idx]) - } - } - } else { - // If has one/many relations, use primary keys - for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() { - associationForeignFieldNames = append(associationForeignFieldNames, field.Name) - associationForeignDBNames = append(associationForeignDBNames, field.DBName) - } - } - - newPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, field.Interface()) - - if len(newPrimaryKeys) > 0 { - sql := fmt.Sprintf("%v NOT IN (%v)", toQueryCondition(scope, associationForeignDBNames), toQueryMarks(newPrimaryKeys)) - newDB = newDB.Where(sql, toQueryValues(newPrimaryKeys)...) - } - } - - if relationship.Kind == "many_to_many" { - // if many to many relations, delete related relations from join table - var sourceForeignFieldNames []string - - for _, dbName := range relationship.ForeignFieldNames { - if field, ok := scope.FieldByName(dbName); ok { - sourceForeignFieldNames = append(sourceForeignFieldNames, field.Name) - } - } - - if sourcePrimaryKeys := scope.getColumnAsArray(sourceForeignFieldNames, scope.Value); len(sourcePrimaryKeys) > 0 { - newDB = newDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(sourcePrimaryKeys)), toQueryValues(sourcePrimaryKeys)...) - - association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB)) - } - } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" { - // has_one or has_many relations, set foreign key to be nil (TODO or delete them?) - var foreignKeyMap = map[string]interface{}{} - for idx, foreignKey := range relationship.ForeignDBNames { - foreignKeyMap[foreignKey] = nil - if field, ok := scope.FieldByName(relationship.AssociationForeignFieldNames[idx]); ok { - newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) - } - } - - fieldValue := reflect.New(association.field.Field.Type()).Interface() - association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error) - } - } - return association -} - -// Delete remove relationship between source & passed arguments, but won't delete those arguments -func (association *Association) Delete(values ...interface{}) *Association { - if association.Error != nil { - return association - } - - var ( - relationship = association.field.Relationship - scope = association.scope - field = association.field.Field - newDB = scope.NewDB() - ) - - if len(values) == 0 { - return association - } - - var deletingResourcePrimaryFieldNames, deletingResourcePrimaryDBNames []string - for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() { - deletingResourcePrimaryFieldNames = append(deletingResourcePrimaryFieldNames, field.Name) - deletingResourcePrimaryDBNames = append(deletingResourcePrimaryDBNames, field.DBName) - } - - deletingPrimaryKeys := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, values...) - - if relationship.Kind == "many_to_many" { - // source value's foreign keys - for idx, foreignKey := range relationship.ForeignDBNames { - if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok { - newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) - } - } - - // get association's foreign fields name - var associationScope = scope.New(reflect.New(field.Type()).Interface()) - var associationForeignFieldNames []string - for _, associationDBName := range relationship.AssociationForeignFieldNames { - if field, ok := associationScope.FieldByName(associationDBName); ok { - associationForeignFieldNames = append(associationForeignFieldNames, field.Name) - } - } - - // association value's foreign keys - deletingPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, values...) - sql := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(deletingPrimaryKeys)) - newDB = newDB.Where(sql, toQueryValues(deletingPrimaryKeys)...) - - association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB)) - } else { - var foreignKeyMap = map[string]interface{}{} - for _, foreignKey := range relationship.ForeignDBNames { - foreignKeyMap[foreignKey] = nil - } - - if relationship.Kind == "belongs_to" { - // find with deleting relation's foreign keys - primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, values...) - newDB = newDB.Where( - fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), - toQueryValues(primaryKeys)..., - ) - - // set foreign key to be null if there are some records affected - modelValue := reflect.New(scope.GetModelStruct().ModelType).Interface() - if results := newDB.Model(modelValue).UpdateColumn(foreignKeyMap); results.Error == nil { - if results.RowsAffected > 0 { - scope.updatedAttrsWithValues(foreignKeyMap) - } - } else { - association.setErr(results.Error) - } - } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" { - // find all relations - primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value) - newDB = newDB.Where( - fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), - toQueryValues(primaryKeys)..., - ) - - // only include those deleting relations - newDB = newDB.Where( - fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, deletingResourcePrimaryDBNames), toQueryMarks(deletingPrimaryKeys)), - toQueryValues(deletingPrimaryKeys)..., - ) - - // set matched relation's foreign key to be null - fieldValue := reflect.New(association.field.Field.Type()).Interface() - association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error) - } - } - - // Remove deleted records from source's field - if association.Error == nil { - if field.Kind() == reflect.Slice { - leftValues := reflect.Zero(field.Type()) - - for i := 0; i < field.Len(); i++ { - reflectValue := field.Index(i) - primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, reflectValue.Interface())[0] - var isDeleted = false - for _, pk := range deletingPrimaryKeys { - if equalAsString(primaryKey, pk) { - isDeleted = true - break - } - } - if !isDeleted { - leftValues = reflect.Append(leftValues, reflectValue) - } - } - - association.field.Set(leftValues) - } else if field.Kind() == reflect.Struct { - primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, field.Interface())[0] - for _, pk := range deletingPrimaryKeys { - if equalAsString(primaryKey, pk) { - association.field.Set(reflect.Zero(field.Type())) - break - } - } - } - } - - return association -} - -// Clear remove relationship between source & current associations, won't delete those associations -func (association *Association) Clear() *Association { - return association.Replace() -} - -// Count return the count of current associations -func (association *Association) Count() int { - var ( - count = 0 - relationship = association.field.Relationship - scope = association.scope - fieldValue = association.field.Field.Interface() - query = scope.DB() - ) - - switch relationship.Kind { - case "many_to_many": - query = relationship.JoinTableHandler.JoinWith(relationship.JoinTableHandler, query, scope.Value) - case "has_many", "has_one": - primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value) - query = query.Where( - fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), - toQueryValues(primaryKeys)..., - ) - case "belongs_to": - primaryKeys := scope.getColumnAsArray(relationship.ForeignFieldNames, scope.Value) - query = query.Where( - fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(primaryKeys)), - toQueryValues(primaryKeys)..., - ) - } - - if relationship.PolymorphicType != "" { - query = query.Where( - fmt.Sprintf("%v.%v = ?", scope.New(fieldValue).QuotedTableName(), scope.Quote(relationship.PolymorphicDBName)), - relationship.PolymorphicValue, - ) - } - - if err := query.Model(fieldValue).Count(&count).Error; err != nil { - association.Error = err - } - return count -} - -// saveAssociations save passed values as associations -func (association *Association) saveAssociations(values ...interface{}) *Association { - var ( - scope = association.scope - field = association.field - relationship = field.Relationship - ) - - saveAssociation := func(reflectValue reflect.Value) { - // value has to been pointer - if reflectValue.Kind() != reflect.Ptr { - reflectPtr := reflect.New(reflectValue.Type()) - reflectPtr.Elem().Set(reflectValue) - reflectValue = reflectPtr - } - - // value has to been saved for many2many - if relationship.Kind == "many_to_many" { - if scope.New(reflectValue.Interface()).PrimaryKeyZero() { - association.setErr(scope.NewDB().Save(reflectValue.Interface()).Error) - } - } - - // Assign Fields - var fieldType = field.Field.Type() - var setFieldBackToValue, setSliceFieldBackToValue bool - if reflectValue.Type().AssignableTo(fieldType) { - field.Set(reflectValue) - } else if reflectValue.Type().Elem().AssignableTo(fieldType) { - // if field's type is struct, then need to set value back to argument after save - setFieldBackToValue = true - field.Set(reflectValue.Elem()) - } else if fieldType.Kind() == reflect.Slice { - if reflectValue.Type().AssignableTo(fieldType.Elem()) { - field.Set(reflect.Append(field.Field, reflectValue)) - } else if reflectValue.Type().Elem().AssignableTo(fieldType.Elem()) { - // if field's type is slice of struct, then need to set value back to argument after save - setSliceFieldBackToValue = true - field.Set(reflect.Append(field.Field, reflectValue.Elem())) - } - } - - if relationship.Kind == "many_to_many" { - association.setErr(relationship.JoinTableHandler.Add(relationship.JoinTableHandler, scope.NewDB(), scope.Value, reflectValue.Interface())) - } else { - association.setErr(scope.NewDB().Select(field.Name).Save(scope.Value).Error) - - if setFieldBackToValue { - reflectValue.Elem().Set(field.Field) - } else if setSliceFieldBackToValue { - reflectValue.Elem().Set(field.Field.Index(field.Field.Len() - 1)) - } - } - } - - for _, value := range values { - reflectValue := reflect.ValueOf(value) - indirectReflectValue := reflect.Indirect(reflectValue) - if indirectReflectValue.Kind() == reflect.Struct { - saveAssociation(reflectValue) - } else if indirectReflectValue.Kind() == reflect.Slice { - for i := 0; i < indirectReflectValue.Len(); i++ { - saveAssociation(indirectReflectValue.Index(i)) - } - } else { - association.setErr(errors.New("invalid value type")) - } - } - return association -} - -// setErr set error when the error is not nil. And return Association. -func (association *Association) setErr(err error) *Association { - if err != nil { - association.Error = err - } - return association -} diff --git a/vendor/github.com/jinzhu/gorm/callback.go b/vendor/github.com/jinzhu/gorm/callback.go deleted file mode 100644 index 1f0e3c7..0000000 --- a/vendor/github.com/jinzhu/gorm/callback.go +++ /dev/null @@ -1,250 +0,0 @@ -package gorm - -import "fmt" - -// DefaultCallback default callbacks defined by gorm -var DefaultCallback = &Callback{logger: nopLogger{}} - -// Callback is a struct that contains all CRUD callbacks -// Field `creates` contains callbacks will be call when creating object -// Field `updates` contains callbacks will be call when updating object -// Field `deletes` contains callbacks will be call when deleting object -// Field `queries` contains callbacks will be call when querying object with query methods like Find, First, Related, Association... -// Field `rowQueries` contains callbacks will be call when querying object with Row, Rows... -// Field `processors` contains all callback processors, will be used to generate above callbacks in order -type Callback struct { - logger logger - creates []*func(scope *Scope) - updates []*func(scope *Scope) - deletes []*func(scope *Scope) - queries []*func(scope *Scope) - rowQueries []*func(scope *Scope) - processors []*CallbackProcessor -} - -// CallbackProcessor contains callback informations -type CallbackProcessor struct { - logger logger - name string // current callback's name - before string // register current callback before a callback - after string // register current callback after a callback - replace bool // replace callbacks with same name - remove bool // delete callbacks with same name - kind string // callback type: create, update, delete, query, row_query - processor *func(scope *Scope) // callback handler - parent *Callback -} - -func (c *Callback) clone(logger logger) *Callback { - return &Callback{ - logger: logger, - creates: c.creates, - updates: c.updates, - deletes: c.deletes, - queries: c.queries, - rowQueries: c.rowQueries, - processors: c.processors, - } -} - -// Create could be used to register callbacks for creating object -// db.Callback().Create().After("gorm:create").Register("plugin:run_after_create", func(*Scope) { -// // business logic -// ... -// -// // set error if some thing wrong happened, will rollback the creating -// scope.Err(errors.New("error")) -// }) -func (c *Callback) Create() *CallbackProcessor { - return &CallbackProcessor{logger: c.logger, kind: "create", parent: c} -} - -// Update could be used to register callbacks for updating object, refer `Create` for usage -func (c *Callback) Update() *CallbackProcessor { - return &CallbackProcessor{logger: c.logger, kind: "update", parent: c} -} - -// Delete could be used to register callbacks for deleting object, refer `Create` for usage -func (c *Callback) Delete() *CallbackProcessor { - return &CallbackProcessor{logger: c.logger, kind: "delete", parent: c} -} - -// Query could be used to register callbacks for querying objects with query methods like `Find`, `First`, `Related`, `Association`... -// Refer `Create` for usage -func (c *Callback) Query() *CallbackProcessor { - return &CallbackProcessor{logger: c.logger, kind: "query", parent: c} -} - -// RowQuery could be used to register callbacks for querying objects with `Row`, `Rows`, refer `Create` for usage -func (c *Callback) RowQuery() *CallbackProcessor { - return &CallbackProcessor{logger: c.logger, kind: "row_query", parent: c} -} - -// After insert a new callback after callback `callbackName`, refer `Callbacks.Create` -func (cp *CallbackProcessor) After(callbackName string) *CallbackProcessor { - cp.after = callbackName - return cp -} - -// Before insert a new callback before callback `callbackName`, refer `Callbacks.Create` -func (cp *CallbackProcessor) Before(callbackName string) *CallbackProcessor { - cp.before = callbackName - return cp -} - -// Register a new callback, refer `Callbacks.Create` -func (cp *CallbackProcessor) Register(callbackName string, callback func(scope *Scope)) { - if cp.kind == "row_query" { - if cp.before == "" && cp.after == "" && callbackName != "gorm:row_query" { - cp.logger.Print("info", fmt.Sprintf("Registering RowQuery callback %v without specify order with Before(), After(), applying Before('gorm:row_query') by default for compatibility...", callbackName)) - cp.before = "gorm:row_query" - } - } - - cp.logger.Print("info", fmt.Sprintf("[info] registering callback `%v` from %v", callbackName, fileWithLineNum())) - cp.name = callbackName - cp.processor = &callback - cp.parent.processors = append(cp.parent.processors, cp) - cp.parent.reorder() -} - -// Remove a registered callback -// db.Callback().Create().Remove("gorm:update_time_stamp_when_create") -func (cp *CallbackProcessor) Remove(callbackName string) { - cp.logger.Print("info", fmt.Sprintf("[info] removing callback `%v` from %v", callbackName, fileWithLineNum())) - cp.name = callbackName - cp.remove = true - cp.parent.processors = append(cp.parent.processors, cp) - cp.parent.reorder() -} - -// Replace a registered callback with new callback -// db.Callback().Create().Replace("gorm:update_time_stamp_when_create", func(*Scope) { -// scope.SetColumn("CreatedAt", now) -// scope.SetColumn("UpdatedAt", now) -// }) -func (cp *CallbackProcessor) Replace(callbackName string, callback func(scope *Scope)) { - cp.logger.Print("info", fmt.Sprintf("[info] replacing callback `%v` from %v", callbackName, fileWithLineNum())) - cp.name = callbackName - cp.processor = &callback - cp.replace = true - cp.parent.processors = append(cp.parent.processors, cp) - cp.parent.reorder() -} - -// Get registered callback -// db.Callback().Create().Get("gorm:create") -func (cp *CallbackProcessor) Get(callbackName string) (callback func(scope *Scope)) { - for _, p := range cp.parent.processors { - if p.name == callbackName && p.kind == cp.kind { - if p.remove { - callback = nil - } else { - callback = *p.processor - } - } - } - return -} - -// getRIndex get right index from string slice -func getRIndex(strs []string, str string) int { - for i := len(strs) - 1; i >= 0; i-- { - if strs[i] == str { - return i - } - } - return -1 -} - -// sortProcessors sort callback processors based on its before, after, remove, replace -func sortProcessors(cps []*CallbackProcessor) []*func(scope *Scope) { - var ( - allNames, sortedNames []string - sortCallbackProcessor func(c *CallbackProcessor) - ) - - for _, cp := range cps { - // show warning message the callback name already exists - if index := getRIndex(allNames, cp.name); index > -1 && !cp.replace && !cp.remove { - cp.logger.Print("warning", fmt.Sprintf("[warning] duplicated callback `%v` from %v", cp.name, fileWithLineNum())) - } - allNames = append(allNames, cp.name) - } - - sortCallbackProcessor = func(c *CallbackProcessor) { - if getRIndex(sortedNames, c.name) == -1 { // if not sorted - if c.before != "" { // if defined before callback - if index := getRIndex(sortedNames, c.before); index != -1 { - // if before callback already sorted, append current callback just after it - sortedNames = append(sortedNames[:index], append([]string{c.name}, sortedNames[index:]...)...) - } else if index := getRIndex(allNames, c.before); index != -1 { - // if before callback exists but haven't sorted, append current callback to last - sortedNames = append(sortedNames, c.name) - sortCallbackProcessor(cps[index]) - } - } - - if c.after != "" { // if defined after callback - if index := getRIndex(sortedNames, c.after); index != -1 { - // if after callback already sorted, append current callback just before it - sortedNames = append(sortedNames[:index+1], append([]string{c.name}, sortedNames[index+1:]...)...) - } else if index := getRIndex(allNames, c.after); index != -1 { - // if after callback exists but haven't sorted - cp := cps[index] - // set after callback's before callback to current callback - if cp.before == "" { - cp.before = c.name - } - sortCallbackProcessor(cp) - } - } - - // if current callback haven't been sorted, append it to last - if getRIndex(sortedNames, c.name) == -1 { - sortedNames = append(sortedNames, c.name) - } - } - } - - for _, cp := range cps { - sortCallbackProcessor(cp) - } - - var sortedFuncs []*func(scope *Scope) - for _, name := range sortedNames { - if index := getRIndex(allNames, name); !cps[index].remove { - sortedFuncs = append(sortedFuncs, cps[index].processor) - } - } - - return sortedFuncs -} - -// reorder all registered processors, and reset CRUD callbacks -func (c *Callback) reorder() { - var creates, updates, deletes, queries, rowQueries []*CallbackProcessor - - for _, processor := range c.processors { - if processor.name != "" { - switch processor.kind { - case "create": - creates = append(creates, processor) - case "update": - updates = append(updates, processor) - case "delete": - deletes = append(deletes, processor) - case "query": - queries = append(queries, processor) - case "row_query": - rowQueries = append(rowQueries, processor) - } - } - } - - c.creates = sortProcessors(creates) - c.updates = sortProcessors(updates) - c.deletes = sortProcessors(deletes) - c.queries = sortProcessors(queries) - c.rowQueries = sortProcessors(rowQueries) -} diff --git a/vendor/github.com/jinzhu/gorm/callback_create.go b/vendor/github.com/jinzhu/gorm/callback_create.go deleted file mode 100644 index c4d25f3..0000000 --- a/vendor/github.com/jinzhu/gorm/callback_create.go +++ /dev/null @@ -1,197 +0,0 @@ -package gorm - -import ( - "fmt" - "strings" -) - -// Define callbacks for creating -func init() { - DefaultCallback.Create().Register("gorm:begin_transaction", beginTransactionCallback) - DefaultCallback.Create().Register("gorm:before_create", beforeCreateCallback) - DefaultCallback.Create().Register("gorm:save_before_associations", saveBeforeAssociationsCallback) - DefaultCallback.Create().Register("gorm:update_time_stamp", updateTimeStampForCreateCallback) - DefaultCallback.Create().Register("gorm:create", createCallback) - DefaultCallback.Create().Register("gorm:force_reload_after_create", forceReloadAfterCreateCallback) - DefaultCallback.Create().Register("gorm:save_after_associations", saveAfterAssociationsCallback) - DefaultCallback.Create().Register("gorm:after_create", afterCreateCallback) - DefaultCallback.Create().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) -} - -// beforeCreateCallback will invoke `BeforeSave`, `BeforeCreate` method before creating -func beforeCreateCallback(scope *Scope) { - if !scope.HasError() { - scope.CallMethod("BeforeSave") - } - if !scope.HasError() { - scope.CallMethod("BeforeCreate") - } -} - -// updateTimeStampForCreateCallback will set `CreatedAt`, `UpdatedAt` when creating -func updateTimeStampForCreateCallback(scope *Scope) { - if !scope.HasError() { - now := scope.db.nowFunc() - - if createdAtField, ok := scope.FieldByName("CreatedAt"); ok { - if createdAtField.IsBlank { - createdAtField.Set(now) - } - } - - if updatedAtField, ok := scope.FieldByName("UpdatedAt"); ok { - if updatedAtField.IsBlank { - updatedAtField.Set(now) - } - } - } -} - -// createCallback the callback used to insert data into database -func createCallback(scope *Scope) { - if !scope.HasError() { - defer scope.trace(NowFunc()) - - var ( - columns, placeholders []string - blankColumnsWithDefaultValue []string - ) - - for _, field := range scope.Fields() { - if scope.changeableField(field) { - if field.IsNormal && !field.IsIgnored { - if field.IsBlank && field.HasDefaultValue { - blankColumnsWithDefaultValue = append(blankColumnsWithDefaultValue, scope.Quote(field.DBName)) - scope.InstanceSet("gorm:blank_columns_with_default_value", blankColumnsWithDefaultValue) - } else if !field.IsPrimaryKey || !field.IsBlank { - columns = append(columns, scope.Quote(field.DBName)) - placeholders = append(placeholders, scope.AddToVars(field.Field.Interface())) - } - } else if field.Relationship != nil && field.Relationship.Kind == "belongs_to" { - for _, foreignKey := range field.Relationship.ForeignDBNames { - if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) { - columns = append(columns, scope.Quote(foreignField.DBName)) - placeholders = append(placeholders, scope.AddToVars(foreignField.Field.Interface())) - } - } - } - } - } - - var ( - returningColumn = "*" - quotedTableName = scope.QuotedTableName() - primaryField = scope.PrimaryField() - extraOption string - insertModifier string - ) - - if str, ok := scope.Get("gorm:insert_option"); ok { - extraOption = fmt.Sprint(str) - } - if str, ok := scope.Get("gorm:insert_modifier"); ok { - insertModifier = strings.ToUpper(fmt.Sprint(str)) - if insertModifier == "INTO" { - insertModifier = "" - } - } - - if primaryField != nil { - returningColumn = scope.Quote(primaryField.DBName) - } - - lastInsertIDOutputInterstitial := scope.Dialect().LastInsertIDOutputInterstitial(quotedTableName, returningColumn, columns) - var lastInsertIDReturningSuffix string - if lastInsertIDOutputInterstitial == "" { - lastInsertIDReturningSuffix = scope.Dialect().LastInsertIDReturningSuffix(quotedTableName, returningColumn) - } - - if len(columns) == 0 { - scope.Raw(fmt.Sprintf( - "INSERT%v INTO %v %v%v%v", - addExtraSpaceIfExist(insertModifier), - quotedTableName, - scope.Dialect().DefaultValueStr(), - addExtraSpaceIfExist(extraOption), - addExtraSpaceIfExist(lastInsertIDReturningSuffix), - )) - } else { - scope.Raw(fmt.Sprintf( - "INSERT%v INTO %v (%v)%v VALUES (%v)%v%v", - addExtraSpaceIfExist(insertModifier), - scope.QuotedTableName(), - strings.Join(columns, ","), - addExtraSpaceIfExist(lastInsertIDOutputInterstitial), - strings.Join(placeholders, ","), - addExtraSpaceIfExist(extraOption), - addExtraSpaceIfExist(lastInsertIDReturningSuffix), - )) - } - - // execute create sql: no primaryField - if primaryField == nil { - if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { - // set rows affected count - scope.db.RowsAffected, _ = result.RowsAffected() - - // set primary value to primary field - if primaryField != nil && primaryField.IsBlank { - if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil { - scope.Err(primaryField.Set(primaryValue)) - } - } - } - return - } - - // execute create sql: lastInsertID implemention for majority of dialects - if lastInsertIDReturningSuffix == "" && lastInsertIDOutputInterstitial == "" { - if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { - // set rows affected count - scope.db.RowsAffected, _ = result.RowsAffected() - - // set primary value to primary field - if primaryField != nil && primaryField.IsBlank { - if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil { - scope.Err(primaryField.Set(primaryValue)) - } - } - } - return - } - - // execute create sql: dialects with additional lastInsertID requirements (currently postgres & mssql) - if primaryField.Field.CanAddr() { - if err := scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...).Scan(primaryField.Field.Addr().Interface()); scope.Err(err) == nil { - primaryField.IsBlank = false - scope.db.RowsAffected = 1 - } - } else { - scope.Err(ErrUnaddressable) - } - return - } -} - -// forceReloadAfterCreateCallback will reload columns that having default value, and set it back to current object -func forceReloadAfterCreateCallback(scope *Scope) { - if blankColumnsWithDefaultValue, ok := scope.InstanceGet("gorm:blank_columns_with_default_value"); ok { - db := scope.DB().New().Table(scope.TableName()).Select(blankColumnsWithDefaultValue.([]string)) - for _, field := range scope.Fields() { - if field.IsPrimaryKey && !field.IsBlank { - db = db.Where(fmt.Sprintf("%v = ?", field.DBName), field.Field.Interface()) - } - } - db.Scan(scope.Value) - } -} - -// afterCreateCallback will invoke `AfterCreate`, `AfterSave` method after creating -func afterCreateCallback(scope *Scope) { - if !scope.HasError() { - scope.CallMethod("AfterCreate") - } - if !scope.HasError() { - scope.CallMethod("AfterSave") - } -} diff --git a/vendor/github.com/jinzhu/gorm/callback_delete.go b/vendor/github.com/jinzhu/gorm/callback_delete.go deleted file mode 100644 index 48b97ac..0000000 --- a/vendor/github.com/jinzhu/gorm/callback_delete.go +++ /dev/null @@ -1,63 +0,0 @@ -package gorm - -import ( - "errors" - "fmt" -) - -// Define callbacks for deleting -func init() { - DefaultCallback.Delete().Register("gorm:begin_transaction", beginTransactionCallback) - DefaultCallback.Delete().Register("gorm:before_delete", beforeDeleteCallback) - DefaultCallback.Delete().Register("gorm:delete", deleteCallback) - DefaultCallback.Delete().Register("gorm:after_delete", afterDeleteCallback) - DefaultCallback.Delete().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) -} - -// beforeDeleteCallback will invoke `BeforeDelete` method before deleting -func beforeDeleteCallback(scope *Scope) { - if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() { - scope.Err(errors.New("missing WHERE clause while deleting")) - return - } - if !scope.HasError() { - scope.CallMethod("BeforeDelete") - } -} - -// deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete) -func deleteCallback(scope *Scope) { - if !scope.HasError() { - var extraOption string - if str, ok := scope.Get("gorm:delete_option"); ok { - extraOption = fmt.Sprint(str) - } - - deletedAtField, hasDeletedAtField := scope.FieldByName("DeletedAt") - - if !scope.Search.Unscoped && hasDeletedAtField { - scope.Raw(fmt.Sprintf( - "UPDATE %v SET %v=%v%v%v", - scope.QuotedTableName(), - scope.Quote(deletedAtField.DBName), - scope.AddToVars(scope.db.nowFunc()), - addExtraSpaceIfExist(scope.CombinedConditionSql()), - addExtraSpaceIfExist(extraOption), - )).Exec() - } else { - scope.Raw(fmt.Sprintf( - "DELETE FROM %v%v%v", - scope.QuotedTableName(), - addExtraSpaceIfExist(scope.CombinedConditionSql()), - addExtraSpaceIfExist(extraOption), - )).Exec() - } - } -} - -// afterDeleteCallback will invoke `AfterDelete` method after deleting -func afterDeleteCallback(scope *Scope) { - if !scope.HasError() { - scope.CallMethod("AfterDelete") - } -} diff --git a/vendor/github.com/jinzhu/gorm/callback_query.go b/vendor/github.com/jinzhu/gorm/callback_query.go deleted file mode 100644 index 544afd6..0000000 --- a/vendor/github.com/jinzhu/gorm/callback_query.go +++ /dev/null @@ -1,109 +0,0 @@ -package gorm - -import ( - "errors" - "fmt" - "reflect" -) - -// Define callbacks for querying -func init() { - DefaultCallback.Query().Register("gorm:query", queryCallback) - DefaultCallback.Query().Register("gorm:preload", preloadCallback) - DefaultCallback.Query().Register("gorm:after_query", afterQueryCallback) -} - -// queryCallback used to query data from database -func queryCallback(scope *Scope) { - if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip { - return - } - - //we are only preloading relations, dont touch base model - if _, skip := scope.InstanceGet("gorm:only_preload"); skip { - return - } - - defer scope.trace(NowFunc()) - - var ( - isSlice, isPtr bool - resultType reflect.Type - results = scope.IndirectValue() - ) - - if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok { - if primaryField := scope.PrimaryField(); primaryField != nil { - scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryField.DBName), orderBy)) - } - } - - if value, ok := scope.Get("gorm:query_destination"); ok { - results = indirect(reflect.ValueOf(value)) - } - - if kind := results.Kind(); kind == reflect.Slice { - isSlice = true - resultType = results.Type().Elem() - results.Set(reflect.MakeSlice(results.Type(), 0, 0)) - - if resultType.Kind() == reflect.Ptr { - isPtr = true - resultType = resultType.Elem() - } - } else if kind != reflect.Struct { - scope.Err(errors.New("unsupported destination, should be slice or struct")) - return - } - - scope.prepareQuerySQL() - - if !scope.HasError() { - scope.db.RowsAffected = 0 - - if str, ok := scope.Get("gorm:query_hint"); ok { - scope.SQL = fmt.Sprint(str) + scope.SQL - } - - if str, ok := scope.Get("gorm:query_option"); ok { - scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str)) - } - - if rows, err := scope.SQLDB().Query(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { - defer rows.Close() - - columns, _ := rows.Columns() - for rows.Next() { - scope.db.RowsAffected++ - - elem := results - if isSlice { - elem = reflect.New(resultType).Elem() - } - - scope.scan(rows, columns, scope.New(elem.Addr().Interface()).Fields()) - - if isSlice { - if isPtr { - results.Set(reflect.Append(results, elem.Addr())) - } else { - results.Set(reflect.Append(results, elem)) - } - } - } - - if err := rows.Err(); err != nil { - scope.Err(err) - } else if scope.db.RowsAffected == 0 && !isSlice { - scope.Err(ErrRecordNotFound) - } - } - } -} - -// afterQueryCallback will invoke `AfterFind` method after querying -func afterQueryCallback(scope *Scope) { - if !scope.HasError() { - scope.CallMethod("AfterFind") - } -} diff --git a/vendor/github.com/jinzhu/gorm/callback_query_preload.go b/vendor/github.com/jinzhu/gorm/callback_query_preload.go deleted file mode 100644 index a936180..0000000 --- a/vendor/github.com/jinzhu/gorm/callback_query_preload.go +++ /dev/null @@ -1,410 +0,0 @@ -package gorm - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" -) - -// preloadCallback used to preload associations -func preloadCallback(scope *Scope) { - if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip { - return - } - - if ap, ok := scope.Get("gorm:auto_preload"); ok { - // If gorm:auto_preload IS NOT a bool then auto preload. - // Else if it IS a bool, use the value - if apb, ok := ap.(bool); !ok { - autoPreload(scope) - } else if apb { - autoPreload(scope) - } - } - - if scope.Search.preload == nil || scope.HasError() { - return - } - - var ( - preloadedMap = map[string]bool{} - fields = scope.Fields() - ) - - for _, preload := range scope.Search.preload { - var ( - preloadFields = strings.Split(preload.schema, ".") - currentScope = scope - currentFields = fields - ) - - for idx, preloadField := range preloadFields { - var currentPreloadConditions []interface{} - - if currentScope == nil { - continue - } - - // if not preloaded - if preloadKey := strings.Join(preloadFields[:idx+1], "."); !preloadedMap[preloadKey] { - - // assign search conditions to last preload - if idx == len(preloadFields)-1 { - currentPreloadConditions = preload.conditions - } - - for _, field := range currentFields { - if field.Name != preloadField || field.Relationship == nil { - continue - } - - switch field.Relationship.Kind { - case "has_one": - currentScope.handleHasOnePreload(field, currentPreloadConditions) - case "has_many": - currentScope.handleHasManyPreload(field, currentPreloadConditions) - case "belongs_to": - currentScope.handleBelongsToPreload(field, currentPreloadConditions) - case "many_to_many": - currentScope.handleManyToManyPreload(field, currentPreloadConditions) - default: - scope.Err(errors.New("unsupported relation")) - } - - preloadedMap[preloadKey] = true - break - } - - if !preloadedMap[preloadKey] { - scope.Err(fmt.Errorf("can't preload field %s for %s", preloadField, currentScope.GetModelStruct().ModelType)) - return - } - } - - // preload next level - if idx < len(preloadFields)-1 { - currentScope = currentScope.getColumnAsScope(preloadField) - if currentScope != nil { - currentFields = currentScope.Fields() - } - } - } - } -} - -func autoPreload(scope *Scope) { - for _, field := range scope.Fields() { - if field.Relationship == nil { - continue - } - - if val, ok := field.TagSettingsGet("PRELOAD"); ok { - if preload, err := strconv.ParseBool(val); err != nil { - scope.Err(errors.New("invalid preload option")) - return - } else if !preload { - continue - } - } - - scope.Search.Preload(field.Name) - } -} - -func (scope *Scope) generatePreloadDBWithConditions(conditions []interface{}) (*DB, []interface{}) { - var ( - preloadDB = scope.NewDB() - preloadConditions []interface{} - ) - - for _, condition := range conditions { - if scopes, ok := condition.(func(*DB) *DB); ok { - preloadDB = scopes(preloadDB) - } else { - preloadConditions = append(preloadConditions, condition) - } - } - - return preloadDB, preloadConditions -} - -// handleHasOnePreload used to preload has one associations -func (scope *Scope) handleHasOnePreload(field *Field, conditions []interface{}) { - relation := field.Relationship - - // get relations's primary keys - primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value) - if len(primaryKeys) == 0 { - return - } - - // preload conditions - preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) - - // find relations - query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys)) - values := toQueryValues(primaryKeys) - if relation.PolymorphicType != "" { - query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName)) - values = append(values, relation.PolymorphicValue) - } - - results := makeSlice(field.Struct.Type) - scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error) - - // assign find results - var ( - resultsValue = indirect(reflect.ValueOf(results)) - indirectScopeValue = scope.IndirectValue() - ) - - if indirectScopeValue.Kind() == reflect.Slice { - foreignValuesToResults := make(map[string]reflect.Value) - for i := 0; i < resultsValue.Len(); i++ { - result := resultsValue.Index(i) - foreignValues := toString(getValueFromFields(result, relation.ForeignFieldNames)) - foreignValuesToResults[foreignValues] = result - } - for j := 0; j < indirectScopeValue.Len(); j++ { - indirectValue := indirect(indirectScopeValue.Index(j)) - valueString := toString(getValueFromFields(indirectValue, relation.AssociationForeignFieldNames)) - if result, found := foreignValuesToResults[valueString]; found { - indirectValue.FieldByName(field.Name).Set(result) - } - } - } else { - for i := 0; i < resultsValue.Len(); i++ { - result := resultsValue.Index(i) - scope.Err(field.Set(result)) - } - } -} - -// handleHasManyPreload used to preload has many associations -func (scope *Scope) handleHasManyPreload(field *Field, conditions []interface{}) { - relation := field.Relationship - - // get relations's primary keys - primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value) - if len(primaryKeys) == 0 { - return - } - - // preload conditions - preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) - - // find relations - query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys)) - values := toQueryValues(primaryKeys) - if relation.PolymorphicType != "" { - query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName)) - values = append(values, relation.PolymorphicValue) - } - - results := makeSlice(field.Struct.Type) - scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error) - - // assign find results - var ( - resultsValue = indirect(reflect.ValueOf(results)) - indirectScopeValue = scope.IndirectValue() - ) - - if indirectScopeValue.Kind() == reflect.Slice { - preloadMap := make(map[string][]reflect.Value) - for i := 0; i < resultsValue.Len(); i++ { - result := resultsValue.Index(i) - foreignValues := getValueFromFields(result, relation.ForeignFieldNames) - preloadMap[toString(foreignValues)] = append(preloadMap[toString(foreignValues)], result) - } - - for j := 0; j < indirectScopeValue.Len(); j++ { - object := indirect(indirectScopeValue.Index(j)) - objectRealValue := getValueFromFields(object, relation.AssociationForeignFieldNames) - f := object.FieldByName(field.Name) - if results, ok := preloadMap[toString(objectRealValue)]; ok { - f.Set(reflect.Append(f, results...)) - } else { - f.Set(reflect.MakeSlice(f.Type(), 0, 0)) - } - } - } else { - scope.Err(field.Set(resultsValue)) - } -} - -// handleBelongsToPreload used to preload belongs to associations -func (scope *Scope) handleBelongsToPreload(field *Field, conditions []interface{}) { - relation := field.Relationship - - // preload conditions - preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) - - // get relations's primary keys - primaryKeys := scope.getColumnAsArray(relation.ForeignFieldNames, scope.Value) - if len(primaryKeys) == 0 { - return - } - - // find relations - results := makeSlice(field.Struct.Type) - scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.AssociationForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error) - - // assign find results - var ( - resultsValue = indirect(reflect.ValueOf(results)) - indirectScopeValue = scope.IndirectValue() - ) - - foreignFieldToObjects := make(map[string][]*reflect.Value) - if indirectScopeValue.Kind() == reflect.Slice { - for j := 0; j < indirectScopeValue.Len(); j++ { - object := indirect(indirectScopeValue.Index(j)) - valueString := toString(getValueFromFields(object, relation.ForeignFieldNames)) - foreignFieldToObjects[valueString] = append(foreignFieldToObjects[valueString], &object) - } - } - - for i := 0; i < resultsValue.Len(); i++ { - result := resultsValue.Index(i) - if indirectScopeValue.Kind() == reflect.Slice { - valueString := toString(getValueFromFields(result, relation.AssociationForeignFieldNames)) - if objects, found := foreignFieldToObjects[valueString]; found { - for _, object := range objects { - object.FieldByName(field.Name).Set(result) - } - } - } else { - scope.Err(field.Set(result)) - } - } -} - -// handleManyToManyPreload used to preload many to many associations -func (scope *Scope) handleManyToManyPreload(field *Field, conditions []interface{}) { - var ( - relation = field.Relationship - joinTableHandler = relation.JoinTableHandler - fieldType = field.Struct.Type.Elem() - foreignKeyValue interface{} - foreignKeyType = reflect.ValueOf(&foreignKeyValue).Type() - linkHash = map[string][]reflect.Value{} - isPtr bool - ) - - if fieldType.Kind() == reflect.Ptr { - isPtr = true - fieldType = fieldType.Elem() - } - - var sourceKeys = []string{} - for _, key := range joinTableHandler.SourceForeignKeys() { - sourceKeys = append(sourceKeys, key.DBName) - } - - // preload conditions - preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) - - // generate query with join table - newScope := scope.New(reflect.New(fieldType).Interface()) - preloadDB = preloadDB.Table(newScope.TableName()).Model(newScope.Value) - - if len(preloadDB.search.selects) == 0 { - preloadDB = preloadDB.Select("*") - } - - preloadDB = joinTableHandler.JoinWith(joinTableHandler, preloadDB, scope.Value) - - // preload inline conditions - if len(preloadConditions) > 0 { - preloadDB = preloadDB.Where(preloadConditions[0], preloadConditions[1:]...) - } - - rows, err := preloadDB.Rows() - - if scope.Err(err) != nil { - return - } - defer rows.Close() - - columns, _ := rows.Columns() - for rows.Next() { - var ( - elem = reflect.New(fieldType).Elem() - fields = scope.New(elem.Addr().Interface()).Fields() - ) - - // register foreign keys in join tables - var joinTableFields []*Field - for _, sourceKey := range sourceKeys { - joinTableFields = append(joinTableFields, &Field{StructField: &StructField{DBName: sourceKey, IsNormal: true}, Field: reflect.New(foreignKeyType).Elem()}) - } - - scope.scan(rows, columns, append(fields, joinTableFields...)) - - scope.New(elem.Addr().Interface()). - InstanceSet("gorm:skip_query_callback", true). - callCallbacks(scope.db.parent.callbacks.queries) - - var foreignKeys = make([]interface{}, len(sourceKeys)) - // generate hashed forkey keys in join table - for idx, joinTableField := range joinTableFields { - if !joinTableField.Field.IsNil() { - foreignKeys[idx] = joinTableField.Field.Elem().Interface() - } - } - hashedSourceKeys := toString(foreignKeys) - - if isPtr { - linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem.Addr()) - } else { - linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem) - } - } - - if err := rows.Err(); err != nil { - scope.Err(err) - } - - // assign find results - var ( - indirectScopeValue = scope.IndirectValue() - fieldsSourceMap = map[string][]reflect.Value{} - foreignFieldNames = []string{} - ) - - for _, dbName := range relation.ForeignFieldNames { - if field, ok := scope.FieldByName(dbName); ok { - foreignFieldNames = append(foreignFieldNames, field.Name) - } - } - - if indirectScopeValue.Kind() == reflect.Slice { - for j := 0; j < indirectScopeValue.Len(); j++ { - object := indirect(indirectScopeValue.Index(j)) - key := toString(getValueFromFields(object, foreignFieldNames)) - fieldsSourceMap[key] = append(fieldsSourceMap[key], object.FieldByName(field.Name)) - } - } else if indirectScopeValue.IsValid() { - key := toString(getValueFromFields(indirectScopeValue, foreignFieldNames)) - fieldsSourceMap[key] = append(fieldsSourceMap[key], indirectScopeValue.FieldByName(field.Name)) - } - - for source, fields := range fieldsSourceMap { - for _, f := range fields { - //If not 0 this means Value is a pointer and we already added preloaded models to it - if f.Len() != 0 { - continue - } - - v := reflect.MakeSlice(f.Type(), 0, 0) - if len(linkHash[source]) > 0 { - v = reflect.Append(f, linkHash[source]...) - } - - f.Set(v) - } - } -} diff --git a/vendor/github.com/jinzhu/gorm/callback_row_query.go b/vendor/github.com/jinzhu/gorm/callback_row_query.go deleted file mode 100644 index 323b160..0000000 --- a/vendor/github.com/jinzhu/gorm/callback_row_query.go +++ /dev/null @@ -1,41 +0,0 @@ -package gorm - -import ( - "database/sql" - "fmt" -) - -// Define callbacks for row query -func init() { - DefaultCallback.RowQuery().Register("gorm:row_query", rowQueryCallback) -} - -type RowQueryResult struct { - Row *sql.Row -} - -type RowsQueryResult struct { - Rows *sql.Rows - Error error -} - -// queryCallback used to query data from database -func rowQueryCallback(scope *Scope) { - if result, ok := scope.InstanceGet("row_query_result"); ok { - scope.prepareQuerySQL() - - if str, ok := scope.Get("gorm:query_hint"); ok { - scope.SQL = fmt.Sprint(str) + scope.SQL - } - - if str, ok := scope.Get("gorm:query_option"); ok { - scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str)) - } - - if rowResult, ok := result.(*RowQueryResult); ok { - rowResult.Row = scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...) - } else if rowsResult, ok := result.(*RowsQueryResult); ok { - rowsResult.Rows, rowsResult.Error = scope.SQLDB().Query(scope.SQL, scope.SQLVars...) - } - } -} diff --git a/vendor/github.com/jinzhu/gorm/callback_save.go b/vendor/github.com/jinzhu/gorm/callback_save.go deleted file mode 100644 index 3b4e058..0000000 --- a/vendor/github.com/jinzhu/gorm/callback_save.go +++ /dev/null @@ -1,170 +0,0 @@ -package gorm - -import ( - "reflect" - "strings" -) - -func beginTransactionCallback(scope *Scope) { - scope.Begin() -} - -func commitOrRollbackTransactionCallback(scope *Scope) { - scope.CommitOrRollback() -} - -func saveAssociationCheck(scope *Scope, field *Field) (autoUpdate bool, autoCreate bool, saveReference bool, r *Relationship) { - checkTruth := func(value interface{}) bool { - if v, ok := value.(bool); ok && !v { - return false - } - - if v, ok := value.(string); ok { - v = strings.ToLower(v) - return v == "true" - } - - return true - } - - if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored { - if r = field.Relationship; r != nil { - autoUpdate, autoCreate, saveReference = true, true, true - - if value, ok := scope.Get("gorm:save_associations"); ok { - autoUpdate = checkTruth(value) - autoCreate = autoUpdate - saveReference = autoUpdate - } else if value, ok := field.TagSettingsGet("SAVE_ASSOCIATIONS"); ok { - autoUpdate = checkTruth(value) - autoCreate = autoUpdate - saveReference = autoUpdate - } - - if value, ok := scope.Get("gorm:association_autoupdate"); ok { - autoUpdate = checkTruth(value) - } else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOUPDATE"); ok { - autoUpdate = checkTruth(value) - } - - if value, ok := scope.Get("gorm:association_autocreate"); ok { - autoCreate = checkTruth(value) - } else if value, ok := field.TagSettingsGet("ASSOCIATION_AUTOCREATE"); ok { - autoCreate = checkTruth(value) - } - - if value, ok := scope.Get("gorm:association_save_reference"); ok { - saveReference = checkTruth(value) - } else if value, ok := field.TagSettingsGet("ASSOCIATION_SAVE_REFERENCE"); ok { - saveReference = checkTruth(value) - } - } - } - - return -} - -func saveBeforeAssociationsCallback(scope *Scope) { - for _, field := range scope.Fields() { - autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field) - - if relationship != nil && relationship.Kind == "belongs_to" { - fieldValue := field.Field.Addr().Interface() - newScope := scope.New(fieldValue) - - if newScope.PrimaryKeyZero() { - if autoCreate { - scope.Err(scope.NewDB().Save(fieldValue).Error) - } - } else if autoUpdate { - scope.Err(scope.NewDB().Save(fieldValue).Error) - } - - if saveReference { - if len(relationship.ForeignFieldNames) != 0 { - // set value's foreign key - for idx, fieldName := range relationship.ForeignFieldNames { - associationForeignName := relationship.AssociationForeignDBNames[idx] - if foreignField, ok := scope.New(fieldValue).FieldByName(associationForeignName); ok { - scope.Err(scope.SetColumn(fieldName, foreignField.Field.Interface())) - } - } - } - } - } - } -} - -func saveAfterAssociationsCallback(scope *Scope) { - for _, field := range scope.Fields() { - autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field) - - if relationship != nil && (relationship.Kind == "has_one" || relationship.Kind == "has_many" || relationship.Kind == "many_to_many") { - value := field.Field - - switch value.Kind() { - case reflect.Slice: - for i := 0; i < value.Len(); i++ { - newDB := scope.NewDB() - elem := value.Index(i).Addr().Interface() - newScope := newDB.NewScope(elem) - - if saveReference { - if relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 { - for idx, fieldName := range relationship.ForeignFieldNames { - associationForeignName := relationship.AssociationForeignDBNames[idx] - if f, ok := scope.FieldByName(associationForeignName); ok { - scope.Err(newScope.SetColumn(fieldName, f.Field.Interface())) - } - } - } - - if relationship.PolymorphicType != "" { - scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue)) - } - } - - if newScope.PrimaryKeyZero() { - if autoCreate { - scope.Err(newDB.Save(elem).Error) - } - } else if autoUpdate { - scope.Err(newDB.Save(elem).Error) - } - - if !scope.New(newScope.Value).PrimaryKeyZero() && saveReference { - if joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil { - scope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value)) - } - } - } - default: - elem := value.Addr().Interface() - newScope := scope.New(elem) - - if saveReference { - if len(relationship.ForeignFieldNames) != 0 { - for idx, fieldName := range relationship.ForeignFieldNames { - associationForeignName := relationship.AssociationForeignDBNames[idx] - if f, ok := scope.FieldByName(associationForeignName); ok { - scope.Err(newScope.SetColumn(fieldName, f.Field.Interface())) - } - } - } - - if relationship.PolymorphicType != "" { - scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue)) - } - } - - if newScope.PrimaryKeyZero() { - if autoCreate { - scope.Err(scope.NewDB().Save(elem).Error) - } - } else if autoUpdate { - scope.Err(scope.NewDB().Save(elem).Error) - } - } - } - } -} diff --git a/vendor/github.com/jinzhu/gorm/callback_update.go b/vendor/github.com/jinzhu/gorm/callback_update.go deleted file mode 100644 index 699e534..0000000 --- a/vendor/github.com/jinzhu/gorm/callback_update.go +++ /dev/null @@ -1,121 +0,0 @@ -package gorm - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Define callbacks for updating -func init() { - DefaultCallback.Update().Register("gorm:assign_updating_attributes", assignUpdatingAttributesCallback) - DefaultCallback.Update().Register("gorm:begin_transaction", beginTransactionCallback) - DefaultCallback.Update().Register("gorm:before_update", beforeUpdateCallback) - DefaultCallback.Update().Register("gorm:save_before_associations", saveBeforeAssociationsCallback) - DefaultCallback.Update().Register("gorm:update_time_stamp", updateTimeStampForUpdateCallback) - DefaultCallback.Update().Register("gorm:update", updateCallback) - DefaultCallback.Update().Register("gorm:save_after_associations", saveAfterAssociationsCallback) - DefaultCallback.Update().Register("gorm:after_update", afterUpdateCallback) - DefaultCallback.Update().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) -} - -// assignUpdatingAttributesCallback assign updating attributes to model -func assignUpdatingAttributesCallback(scope *Scope) { - if attrs, ok := scope.InstanceGet("gorm:update_interface"); ok { - if updateMaps, hasUpdate := scope.updatedAttrsWithValues(attrs); hasUpdate { - scope.InstanceSet("gorm:update_attrs", updateMaps) - } else { - scope.SkipLeft() - } - } -} - -// beforeUpdateCallback will invoke `BeforeSave`, `BeforeUpdate` method before updating -func beforeUpdateCallback(scope *Scope) { - if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() { - scope.Err(errors.New("missing WHERE clause while updating")) - return - } - if _, ok := scope.Get("gorm:update_column"); !ok { - if !scope.HasError() { - scope.CallMethod("BeforeSave") - } - if !scope.HasError() { - scope.CallMethod("BeforeUpdate") - } - } -} - -// updateTimeStampForUpdateCallback will set `UpdatedAt` when updating -func updateTimeStampForUpdateCallback(scope *Scope) { - if _, ok := scope.Get("gorm:update_column"); !ok { - scope.SetColumn("UpdatedAt", scope.db.nowFunc()) - } -} - -// updateCallback the callback used to update data to database -func updateCallback(scope *Scope) { - if !scope.HasError() { - var sqls []string - - if updateAttrs, ok := scope.InstanceGet("gorm:update_attrs"); ok { - // Sort the column names so that the generated SQL is the same every time. - updateMap := updateAttrs.(map[string]interface{}) - var columns []string - for c := range updateMap { - columns = append(columns, c) - } - sort.Strings(columns) - - for _, column := range columns { - value := updateMap[column] - sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(column), scope.AddToVars(value))) - } - } else { - for _, field := range scope.Fields() { - if scope.changeableField(field) { - if !field.IsPrimaryKey && field.IsNormal && (field.Name != "CreatedAt" || !field.IsBlank) { - if !field.IsForeignKey || !field.IsBlank || !field.HasDefaultValue { - sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))) - } - } else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" { - for _, foreignKey := range relationship.ForeignDBNames { - if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) { - sqls = append(sqls, - fmt.Sprintf("%v = %v", scope.Quote(foreignField.DBName), scope.AddToVars(foreignField.Field.Interface()))) - } - } - } - } - } - } - - var extraOption string - if str, ok := scope.Get("gorm:update_option"); ok { - extraOption = fmt.Sprint(str) - } - - if len(sqls) > 0 { - scope.Raw(fmt.Sprintf( - "UPDATE %v SET %v%v%v", - scope.QuotedTableName(), - strings.Join(sqls, ", "), - addExtraSpaceIfExist(scope.CombinedConditionSql()), - addExtraSpaceIfExist(extraOption), - )).Exec() - } - } -} - -// afterUpdateCallback will invoke `AfterUpdate`, `AfterSave` method after updating -func afterUpdateCallback(scope *Scope) { - if _, ok := scope.Get("gorm:update_column"); !ok { - if !scope.HasError() { - scope.CallMethod("AfterUpdate") - } - if !scope.HasError() { - scope.CallMethod("AfterSave") - } - } -} diff --git a/vendor/github.com/jinzhu/gorm/dialect.go b/vendor/github.com/jinzhu/gorm/dialect.go deleted file mode 100644 index 749587f..0000000 --- a/vendor/github.com/jinzhu/gorm/dialect.go +++ /dev/null @@ -1,147 +0,0 @@ -package gorm - -import ( - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" -) - -// Dialect interface contains behaviors that differ across SQL database -type Dialect interface { - // GetName get dialect's name - GetName() string - - // SetDB set db for dialect - SetDB(db SQLCommon) - - // BindVar return the placeholder for actual values in SQL statements, in many dbs it is "?", Postgres using $1 - BindVar(i int) string - // Quote quotes field name to avoid SQL parsing exceptions by using a reserved word as a field name - Quote(key string) string - // DataTypeOf return data's sql type - DataTypeOf(field *StructField) string - - // HasIndex check has index or not - HasIndex(tableName string, indexName string) bool - // HasForeignKey check has foreign key or not - HasForeignKey(tableName string, foreignKeyName string) bool - // RemoveIndex remove index - RemoveIndex(tableName string, indexName string) error - // HasTable check has table or not - HasTable(tableName string) bool - // HasColumn check has column or not - HasColumn(tableName string, columnName string) bool - // ModifyColumn modify column's type - ModifyColumn(tableName string, columnName string, typ string) error - - // LimitAndOffsetSQL return generated SQL with Limit and Offset, as mssql has special case - LimitAndOffsetSQL(limit, offset interface{}) (string, error) - // SelectFromDummyTable return select values, for most dbs, `SELECT values` just works, mysql needs `SELECT value FROM DUAL` - SelectFromDummyTable() string - // LastInsertIDOutputInterstitial most dbs support LastInsertId, but mssql needs to use `OUTPUT` - LastInsertIDOutputInterstitial(tableName, columnName string, columns []string) string - // LastInsertIdReturningSuffix most dbs support LastInsertId, but postgres needs to use `RETURNING` - LastInsertIDReturningSuffix(tableName, columnName string) string - // DefaultValueStr - DefaultValueStr() string - - // BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference - BuildKeyName(kind, tableName string, fields ...string) string - - // NormalizeIndexAndColumn returns valid index name and column name depending on each dialect - NormalizeIndexAndColumn(indexName, columnName string) (string, string) - - // CurrentDatabase return current database name - CurrentDatabase() string -} - -var dialectsMap = map[string]Dialect{} - -func newDialect(name string, db SQLCommon) Dialect { - if value, ok := dialectsMap[name]; ok { - dialect := reflect.New(reflect.TypeOf(value).Elem()).Interface().(Dialect) - dialect.SetDB(db) - return dialect - } - - fmt.Printf("`%v` is not officially supported, running under compatibility mode.\n", name) - commontDialect := &commonDialect{} - commontDialect.SetDB(db) - return commontDialect -} - -// RegisterDialect register new dialect -func RegisterDialect(name string, dialect Dialect) { - dialectsMap[name] = dialect -} - -// GetDialect gets the dialect for the specified dialect name -func GetDialect(name string) (dialect Dialect, ok bool) { - dialect, ok = dialectsMap[name] - return -} - -// ParseFieldStructForDialect get field's sql data type -var ParseFieldStructForDialect = func(field *StructField, dialect Dialect) (fieldValue reflect.Value, sqlType string, size int, additionalType string) { - // Get redirected field type - var ( - reflectType = field.Struct.Type - dataType, _ = field.TagSettingsGet("TYPE") - ) - - for reflectType.Kind() == reflect.Ptr { - reflectType = reflectType.Elem() - } - - // Get redirected field value - fieldValue = reflect.Indirect(reflect.New(reflectType)) - - if gormDataType, ok := fieldValue.Interface().(interface { - GormDataType(Dialect) string - }); ok { - dataType = gormDataType.GormDataType(dialect) - } - - // Get scanner's real value - if dataType == "" { - var getScannerValue func(reflect.Value) - getScannerValue = func(value reflect.Value) { - fieldValue = value - if _, isScanner := reflect.New(fieldValue.Type()).Interface().(sql.Scanner); isScanner && fieldValue.Kind() == reflect.Struct { - getScannerValue(fieldValue.Field(0)) - } - } - getScannerValue(fieldValue) - } - - // Default Size - if num, ok := field.TagSettingsGet("SIZE"); ok { - size, _ = strconv.Atoi(num) - } else { - size = 255 - } - - // Default type from tag setting - notNull, _ := field.TagSettingsGet("NOT NULL") - unique, _ := field.TagSettingsGet("UNIQUE") - additionalType = notNull + " " + unique - if value, ok := field.TagSettingsGet("DEFAULT"); ok { - additionalType = additionalType + " DEFAULT " + value - } - - if value, ok := field.TagSettingsGet("COMMENT"); ok { - additionalType = additionalType + " COMMENT " + value - } - - return fieldValue, dataType, size, strings.TrimSpace(additionalType) -} - -func currentDatabaseAndTable(dialect Dialect, tableName string) (string, string) { - if strings.Contains(tableName, ".") { - splitStrings := strings.SplitN(tableName, ".", 2) - return splitStrings[0], splitStrings[1] - } - return dialect.CurrentDatabase(), tableName -} diff --git a/vendor/github.com/jinzhu/gorm/dialect_common.go b/vendor/github.com/jinzhu/gorm/dialect_common.go deleted file mode 100644 index d549510..0000000 --- a/vendor/github.com/jinzhu/gorm/dialect_common.go +++ /dev/null @@ -1,196 +0,0 @@ -package gorm - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "time" -) - -var keyNameRegex = regexp.MustCompile("[^a-zA-Z0-9]+") - -// DefaultForeignKeyNamer contains the default foreign key name generator method -type DefaultForeignKeyNamer struct { -} - -type commonDialect struct { - db SQLCommon - DefaultForeignKeyNamer -} - -func init() { - RegisterDialect("common", &commonDialect{}) -} - -func (commonDialect) GetName() string { - return "common" -} - -func (s *commonDialect) SetDB(db SQLCommon) { - s.db = db -} - -func (commonDialect) BindVar(i int) string { - return "$$$" // ? -} - -func (commonDialect) Quote(key string) string { - return fmt.Sprintf(`"%s"`, key) -} - -func (s *commonDialect) fieldCanAutoIncrement(field *StructField) bool { - if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok { - return strings.ToLower(value) != "false" - } - return field.IsPrimaryKey -} - -func (s *commonDialect) DataTypeOf(field *StructField) string { - var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s) - - if sqlType == "" { - switch dataValue.Kind() { - case reflect.Bool: - sqlType = "BOOLEAN" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: - if s.fieldCanAutoIncrement(field) { - sqlType = "INTEGER AUTO_INCREMENT" - } else { - sqlType = "INTEGER" - } - case reflect.Int64, reflect.Uint64: - if s.fieldCanAutoIncrement(field) { - sqlType = "BIGINT AUTO_INCREMENT" - } else { - sqlType = "BIGINT" - } - case reflect.Float32, reflect.Float64: - sqlType = "FLOAT" - case reflect.String: - if size > 0 && size < 65532 { - sqlType = fmt.Sprintf("VARCHAR(%d)", size) - } else { - sqlType = "VARCHAR(65532)" - } - case reflect.Struct: - if _, ok := dataValue.Interface().(time.Time); ok { - sqlType = "TIMESTAMP" - } - default: - if _, ok := dataValue.Interface().([]byte); ok { - if size > 0 && size < 65532 { - sqlType = fmt.Sprintf("BINARY(%d)", size) - } else { - sqlType = "BINARY(65532)" - } - } - } - } - - if sqlType == "" { - panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String())) - } - - if strings.TrimSpace(additionalType) == "" { - return sqlType - } - return fmt.Sprintf("%v %v", sqlType, additionalType) -} - -func (s commonDialect) HasIndex(tableName string, indexName string) bool { - var count int - currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) - s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count) - return count > 0 -} - -func (s commonDialect) RemoveIndex(tableName string, indexName string) error { - _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName)) - return err -} - -func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool { - return false -} - -func (s commonDialect) HasTable(tableName string) bool { - var count int - currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) - s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count) - return count > 0 -} - -func (s commonDialect) HasColumn(tableName string, columnName string) bool { - var count int - currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) - s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count) - return count > 0 -} - -func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error { - _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ)) - return err -} - -func (s commonDialect) CurrentDatabase() (name string) { - s.db.QueryRow("SELECT DATABASE()").Scan(&name) - return -} - -// LimitAndOffsetSQL return generated SQL with Limit and Offset -func (s commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string, err error) { - if limit != nil { - if parsedLimit, err := s.parseInt(limit); err != nil { - return "", err - } else if parsedLimit >= 0 { - sql += fmt.Sprintf(" LIMIT %d", parsedLimit) - } - } - if offset != nil { - if parsedOffset, err := s.parseInt(offset); err != nil { - return "", err - } else if parsedOffset >= 0 { - sql += fmt.Sprintf(" OFFSET %d", parsedOffset) - } - } - return -} - -func (commonDialect) SelectFromDummyTable() string { - return "" -} - -func (commonDialect) LastInsertIDOutputInterstitial(tableName, columnName string, columns []string) string { - return "" -} - -func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string { - return "" -} - -func (commonDialect) DefaultValueStr() string { - return "DEFAULT VALUES" -} - -// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference -func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string { - keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_")) - keyName = keyNameRegex.ReplaceAllString(keyName, "_") - return keyName -} - -// NormalizeIndexAndColumn returns argument's index name and column name without doing anything -func (commonDialect) NormalizeIndexAndColumn(indexName, columnName string) (string, string) { - return indexName, columnName -} - -func (commonDialect) parseInt(value interface{}) (int64, error) { - return strconv.ParseInt(fmt.Sprint(value), 0, 0) -} - -// IsByteArrayOrSlice returns true of the reflected value is an array or slice -func IsByteArrayOrSlice(value reflect.Value) bool { - return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0)) -} diff --git a/vendor/github.com/jinzhu/gorm/dialect_mysql.go b/vendor/github.com/jinzhu/gorm/dialect_mysql.go deleted file mode 100644 index b4467ff..0000000 --- a/vendor/github.com/jinzhu/gorm/dialect_mysql.go +++ /dev/null @@ -1,246 +0,0 @@ -package gorm - -import ( - "crypto/sha1" - "database/sql" - "fmt" - "reflect" - "regexp" - "strings" - "time" - "unicode/utf8" -) - -var mysqlIndexRegex = regexp.MustCompile(`^(.+)\((\d+)\)$`) - -type mysql struct { - commonDialect -} - -func init() { - RegisterDialect("mysql", &mysql{}) -} - -func (mysql) GetName() string { - return "mysql" -} - -func (mysql) Quote(key string) string { - return fmt.Sprintf("`%s`", key) -} - -// Get Data Type for MySQL Dialect -func (s *mysql) DataTypeOf(field *StructField) string { - var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s) - - // MySQL allows only one auto increment column per table, and it must - // be a KEY column. - if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok { - if _, ok = field.TagSettingsGet("INDEX"); !ok && !field.IsPrimaryKey { - field.TagSettingsDelete("AUTO_INCREMENT") - } - } - - if sqlType == "" { - switch dataValue.Kind() { - case reflect.Bool: - sqlType = "boolean" - case reflect.Int8: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "tinyint AUTO_INCREMENT" - } else { - sqlType = "tinyint" - } - case reflect.Int, reflect.Int16, reflect.Int32: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "int AUTO_INCREMENT" - } else { - sqlType = "int" - } - case reflect.Uint8: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "tinyint unsigned AUTO_INCREMENT" - } else { - sqlType = "tinyint unsigned" - } - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uintptr: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "int unsigned AUTO_INCREMENT" - } else { - sqlType = "int unsigned" - } - case reflect.Int64: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "bigint AUTO_INCREMENT" - } else { - sqlType = "bigint" - } - case reflect.Uint64: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "bigint unsigned AUTO_INCREMENT" - } else { - sqlType = "bigint unsigned" - } - case reflect.Float32, reflect.Float64: - sqlType = "double" - case reflect.String: - if size > 0 && size < 65532 { - sqlType = fmt.Sprintf("varchar(%d)", size) - } else { - sqlType = "longtext" - } - case reflect.Struct: - if _, ok := dataValue.Interface().(time.Time); ok { - precision := "" - if p, ok := field.TagSettingsGet("PRECISION"); ok { - precision = fmt.Sprintf("(%s)", p) - } - - if _, ok := field.TagSettings["NOT NULL"]; ok || field.IsPrimaryKey { - sqlType = fmt.Sprintf("DATETIME%v", precision) - } else { - sqlType = fmt.Sprintf("DATETIME%v NULL", precision) - } - } - default: - if IsByteArrayOrSlice(dataValue) { - if size > 0 && size < 65532 { - sqlType = fmt.Sprintf("varbinary(%d)", size) - } else { - sqlType = "longblob" - } - } - } - } - - if sqlType == "" { - panic(fmt.Sprintf("invalid sql type %s (%s) in field %s for mysql", dataValue.Type().Name(), dataValue.Kind().String(), field.Name)) - } - - if strings.TrimSpace(additionalType) == "" { - return sqlType - } - return fmt.Sprintf("%v %v", sqlType, additionalType) -} - -func (s mysql) RemoveIndex(tableName string, indexName string) error { - _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName))) - return err -} - -func (s mysql) ModifyColumn(tableName string, columnName string, typ string) error { - _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v MODIFY COLUMN %v %v", tableName, columnName, typ)) - return err -} - -func (s mysql) LimitAndOffsetSQL(limit, offset interface{}) (sql string, err error) { - if limit != nil { - parsedLimit, err := s.parseInt(limit) - if err != nil { - return "", err - } - if parsedLimit >= 0 { - sql += fmt.Sprintf(" LIMIT %d", parsedLimit) - - if offset != nil { - parsedOffset, err := s.parseInt(offset) - if err != nil { - return "", err - } - if parsedOffset >= 0 { - sql += fmt.Sprintf(" OFFSET %d", parsedOffset) - } - } - } - } - return -} - -func (s mysql) HasForeignKey(tableName string, foreignKeyName string) bool { - var count int - currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) - s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_SCHEMA=? AND TABLE_NAME=? AND CONSTRAINT_NAME=? AND CONSTRAINT_TYPE='FOREIGN KEY'", currentDatabase, tableName, foreignKeyName).Scan(&count) - return count > 0 -} - -func (s mysql) HasTable(tableName string) bool { - currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) - var name string - // allow mysql database name with '-' character - if err := s.db.QueryRow(fmt.Sprintf("SHOW TABLES FROM `%s` WHERE `Tables_in_%s` = ?", currentDatabase, currentDatabase), tableName).Scan(&name); err != nil { - if err == sql.ErrNoRows { - return false - } - panic(err) - } else { - return true - } -} - -func (s mysql) HasIndex(tableName string, indexName string) bool { - currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) - if rows, err := s.db.Query(fmt.Sprintf("SHOW INDEXES FROM `%s` FROM `%s` WHERE Key_name = ?", tableName, currentDatabase), indexName); err != nil { - panic(err) - } else { - defer rows.Close() - return rows.Next() - } -} - -func (s mysql) HasColumn(tableName string, columnName string) bool { - currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) - if rows, err := s.db.Query(fmt.Sprintf("SHOW COLUMNS FROM `%s` FROM `%s` WHERE Field = ?", tableName, currentDatabase), columnName); err != nil { - panic(err) - } else { - defer rows.Close() - return rows.Next() - } -} - -func (s mysql) CurrentDatabase() (name string) { - s.db.QueryRow("SELECT DATABASE()").Scan(&name) - return -} - -func (mysql) SelectFromDummyTable() string { - return "FROM DUAL" -} - -func (s mysql) BuildKeyName(kind, tableName string, fields ...string) string { - keyName := s.commonDialect.BuildKeyName(kind, tableName, fields...) - if utf8.RuneCountInString(keyName) <= 64 { - return keyName - } - h := sha1.New() - h.Write([]byte(keyName)) - bs := h.Sum(nil) - - // sha1 is 40 characters, keep first 24 characters of destination - destRunes := []rune(keyNameRegex.ReplaceAllString(fields[0], "_")) - if len(destRunes) > 24 { - destRunes = destRunes[:24] - } - - return fmt.Sprintf("%s%x", string(destRunes), bs) -} - -// NormalizeIndexAndColumn returns index name and column name for specify an index prefix length if needed -func (mysql) NormalizeIndexAndColumn(indexName, columnName string) (string, string) { - submatch := mysqlIndexRegex.FindStringSubmatch(indexName) - if len(submatch) != 3 { - return indexName, columnName - } - indexName = submatch[1] - columnName = fmt.Sprintf("%s(%s)", columnName, submatch[2]) - return indexName, columnName -} - -func (mysql) DefaultValueStr() string { - return "VALUES()" -} diff --git a/vendor/github.com/jinzhu/gorm/dialect_postgres.go b/vendor/github.com/jinzhu/gorm/dialect_postgres.go deleted file mode 100644 index d2df313..0000000 --- a/vendor/github.com/jinzhu/gorm/dialect_postgres.go +++ /dev/null @@ -1,147 +0,0 @@ -package gorm - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - "time" -) - -type postgres struct { - commonDialect -} - -func init() { - RegisterDialect("postgres", &postgres{}) - RegisterDialect("cloudsqlpostgres", &postgres{}) -} - -func (postgres) GetName() string { - return "postgres" -} - -func (postgres) BindVar(i int) string { - return fmt.Sprintf("$%v", i) -} - -func (s *postgres) DataTypeOf(field *StructField) string { - var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s) - - if sqlType == "" { - switch dataValue.Kind() { - case reflect.Bool: - sqlType = "boolean" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uintptr: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "serial" - } else { - sqlType = "integer" - } - case reflect.Int64, reflect.Uint32, reflect.Uint64: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "bigserial" - } else { - sqlType = "bigint" - } - case reflect.Float32, reflect.Float64: - sqlType = "numeric" - case reflect.String: - if _, ok := field.TagSettingsGet("SIZE"); !ok { - size = 0 // if SIZE haven't been set, use `text` as the default type, as there are no performance different - } - - if size > 0 && size < 65532 { - sqlType = fmt.Sprintf("varchar(%d)", size) - } else { - sqlType = "text" - } - case reflect.Struct: - if _, ok := dataValue.Interface().(time.Time); ok { - sqlType = "timestamp with time zone" - } - case reflect.Map: - if dataValue.Type().Name() == "Hstore" { - sqlType = "hstore" - } - default: - if IsByteArrayOrSlice(dataValue) { - sqlType = "bytea" - - if isUUID(dataValue) { - sqlType = "uuid" - } - - if isJSON(dataValue) { - sqlType = "jsonb" - } - } - } - } - - if sqlType == "" { - panic(fmt.Sprintf("invalid sql type %s (%s) for postgres", dataValue.Type().Name(), dataValue.Kind().String())) - } - - if strings.TrimSpace(additionalType) == "" { - return sqlType - } - return fmt.Sprintf("%v %v", sqlType, additionalType) -} - -func (s postgres) HasIndex(tableName string, indexName string) bool { - var count int - s.db.QueryRow("SELECT count(*) FROM pg_indexes WHERE tablename = $1 AND indexname = $2 AND schemaname = CURRENT_SCHEMA()", tableName, indexName).Scan(&count) - return count > 0 -} - -func (s postgres) HasForeignKey(tableName string, foreignKeyName string) bool { - var count int - s.db.QueryRow("SELECT count(con.conname) FROM pg_constraint con WHERE $1::regclass::oid = con.conrelid AND con.conname = $2 AND con.contype='f'", tableName, foreignKeyName).Scan(&count) - return count > 0 -} - -func (s postgres) HasTable(tableName string) bool { - var count int - s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = $1 AND table_type = 'BASE TABLE' AND table_schema = CURRENT_SCHEMA()", tableName).Scan(&count) - return count > 0 -} - -func (s postgres) HasColumn(tableName string, columnName string) bool { - var count int - s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_name = $1 AND column_name = $2 AND table_schema = CURRENT_SCHEMA()", tableName, columnName).Scan(&count) - return count > 0 -} - -func (s postgres) CurrentDatabase() (name string) { - s.db.QueryRow("SELECT CURRENT_DATABASE()").Scan(&name) - return -} - -func (s postgres) LastInsertIDOutputInterstitial(tableName, key string, columns []string) string { - return "" -} - -func (s postgres) LastInsertIDReturningSuffix(tableName, key string) string { - return fmt.Sprintf("RETURNING %v.%v", tableName, key) -} - -func (postgres) SupportLastInsertID() bool { - return false -} - -func isUUID(value reflect.Value) bool { - if value.Kind() != reflect.Array || value.Type().Len() != 16 { - return false - } - typename := value.Type().Name() - lower := strings.ToLower(typename) - return "uuid" == lower || "guid" == lower -} - -func isJSON(value reflect.Value) bool { - _, ok := value.Interface().(json.RawMessage) - return ok -} diff --git a/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go b/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go deleted file mode 100644 index 5f96c36..0000000 --- a/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go +++ /dev/null @@ -1,107 +0,0 @@ -package gorm - -import ( - "fmt" - "reflect" - "strings" - "time" -) - -type sqlite3 struct { - commonDialect -} - -func init() { - RegisterDialect("sqlite3", &sqlite3{}) -} - -func (sqlite3) GetName() string { - return "sqlite3" -} - -// Get Data Type for Sqlite Dialect -func (s *sqlite3) DataTypeOf(field *StructField) string { - var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s) - - if sqlType == "" { - switch dataValue.Kind() { - case reflect.Bool: - sqlType = "bool" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "integer primary key autoincrement" - } else { - sqlType = "integer" - } - case reflect.Int64, reflect.Uint64: - if s.fieldCanAutoIncrement(field) { - field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") - sqlType = "integer primary key autoincrement" - } else { - sqlType = "bigint" - } - case reflect.Float32, reflect.Float64: - sqlType = "real" - case reflect.String: - if size > 0 && size < 65532 { - sqlType = fmt.Sprintf("varchar(%d)", size) - } else { - sqlType = "text" - } - case reflect.Struct: - if _, ok := dataValue.Interface().(time.Time); ok { - sqlType = "datetime" - } - default: - if IsByteArrayOrSlice(dataValue) { - sqlType = "blob" - } - } - } - - if sqlType == "" { - panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite3", dataValue.Type().Name(), dataValue.Kind().String())) - } - - if strings.TrimSpace(additionalType) == "" { - return sqlType - } - return fmt.Sprintf("%v %v", sqlType, additionalType) -} - -func (s sqlite3) HasIndex(tableName string, indexName string) bool { - var count int - s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count) - return count > 0 -} - -func (s sqlite3) HasTable(tableName string) bool { - var count int - s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count) - return count > 0 -} - -func (s sqlite3) HasColumn(tableName string, columnName string) bool { - var count int - s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');\n", columnName, columnName), tableName).Scan(&count) - return count > 0 -} - -func (s sqlite3) CurrentDatabase() (name string) { - var ( - ifaces = make([]interface{}, 3) - pointers = make([]*string, 3) - i int - ) - for i = 0; i < 3; i++ { - ifaces[i] = &pointers[i] - } - if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil { - return - } - if pointers[1] != nil { - name = *pointers[1] - } - return -} diff --git a/vendor/github.com/jinzhu/gorm/docker-compose.yml b/vendor/github.com/jinzhu/gorm/docker-compose.yml deleted file mode 100644 index 79bf5fc..0000000 --- a/vendor/github.com/jinzhu/gorm/docker-compose.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: '3' - -services: - mysql: - image: 'mysql:latest' - ports: - - 9910:3306 - environment: - - MYSQL_DATABASE=gorm - - MYSQL_USER=gorm - - MYSQL_PASSWORD=gorm - - MYSQL_RANDOM_ROOT_PASSWORD="yes" - postgres: - image: 'postgres:latest' - ports: - - 9920:5432 - environment: - - POSTGRES_USER=gorm - - POSTGRES_DB=gorm - - POSTGRES_PASSWORD=gorm - mssql: - image: 'mcmoe/mssqldocker:latest' - ports: - - 9930:1433 - environment: - - ACCEPT_EULA=Y - - SA_PASSWORD=LoremIpsum86 - - MSSQL_DB=gorm - - MSSQL_USER=gorm - - MSSQL_PASSWORD=LoremIpsum86 diff --git a/vendor/github.com/jinzhu/gorm/errors.go b/vendor/github.com/jinzhu/gorm/errors.go deleted file mode 100644 index d5ef8d5..0000000 --- a/vendor/github.com/jinzhu/gorm/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -package gorm - -import ( - "errors" - "strings" -) - -var ( - // ErrRecordNotFound returns a "record not found error". Occurs only when attempting to query the database with a struct; querying with a slice won't return this error - ErrRecordNotFound = errors.New("record not found") - // ErrInvalidSQL occurs when you attempt a query with invalid SQL - ErrInvalidSQL = errors.New("invalid SQL") - // ErrInvalidTransaction occurs when you are trying to `Commit` or `Rollback` - ErrInvalidTransaction = errors.New("no valid transaction") - // ErrCantStartTransaction can't start transaction when you are trying to start one with `Begin` - ErrCantStartTransaction = errors.New("can't start transaction") - // ErrUnaddressable unaddressable value - ErrUnaddressable = errors.New("using unaddressable value") -) - -// Errors contains all happened errors -type Errors []error - -// IsRecordNotFoundError returns true if error contains a RecordNotFound error -func IsRecordNotFoundError(err error) bool { - if errs, ok := err.(Errors); ok { - for _, err := range errs { - if err == ErrRecordNotFound { - return true - } - } - } - return err == ErrRecordNotFound -} - -// GetErrors gets all errors that have occurred and returns a slice of errors (Error type) -func (errs Errors) GetErrors() []error { - return errs -} - -// Add adds an error to a given slice of errors -func (errs Errors) Add(newErrors ...error) Errors { - for _, err := range newErrors { - if err == nil { - continue - } - - if errors, ok := err.(Errors); ok { - errs = errs.Add(errors...) - } else { - ok = true - for _, e := range errs { - if err == e { - ok = false - } - } - if ok { - errs = append(errs, err) - } - } - } - return errs -} - -// Error takes a slice of all errors that have occurred and returns it as a formatted string -func (errs Errors) Error() string { - var errors = []string{} - for _, e := range errs { - errors = append(errors, e.Error()) - } - return strings.Join(errors, "; ") -} diff --git a/vendor/github.com/jinzhu/gorm/field.go b/vendor/github.com/jinzhu/gorm/field.go deleted file mode 100644 index acd06e2..0000000 --- a/vendor/github.com/jinzhu/gorm/field.go +++ /dev/null @@ -1,66 +0,0 @@ -package gorm - -import ( - "database/sql" - "database/sql/driver" - "errors" - "fmt" - "reflect" -) - -// Field model field definition -type Field struct { - *StructField - IsBlank bool - Field reflect.Value -} - -// Set set a value to the field -func (field *Field) Set(value interface{}) (err error) { - if !field.Field.IsValid() { - return errors.New("field value not valid") - } - - if !field.Field.CanAddr() { - return ErrUnaddressable - } - - reflectValue, ok := value.(reflect.Value) - if !ok { - reflectValue = reflect.ValueOf(value) - } - - fieldValue := field.Field - if reflectValue.IsValid() { - if reflectValue.Type().ConvertibleTo(fieldValue.Type()) { - fieldValue.Set(reflectValue.Convert(fieldValue.Type())) - } else { - if fieldValue.Kind() == reflect.Ptr { - if fieldValue.IsNil() { - fieldValue.Set(reflect.New(field.Struct.Type.Elem())) - } - fieldValue = fieldValue.Elem() - } - - if reflectValue.Type().ConvertibleTo(fieldValue.Type()) { - fieldValue.Set(reflectValue.Convert(fieldValue.Type())) - } else if scanner, ok := fieldValue.Addr().Interface().(sql.Scanner); ok { - v := reflectValue.Interface() - if valuer, ok := v.(driver.Valuer); ok { - if v, err = valuer.Value(); err == nil { - err = scanner.Scan(v) - } - } else { - err = scanner.Scan(v) - } - } else { - err = fmt.Errorf("could not convert argument of field %s from %s to %s", field.Name, reflectValue.Type(), fieldValue.Type()) - } - } - } else { - field.Field.Set(reflect.Zero(field.Field.Type())) - } - - field.IsBlank = isBlank(field.Field) - return err -} diff --git a/vendor/github.com/jinzhu/gorm/go.mod b/vendor/github.com/jinzhu/gorm/go.mod deleted file mode 100644 index 6e923b9..0000000 --- a/vendor/github.com/jinzhu/gorm/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module github.com/jinzhu/gorm - -go 1.12 - -require ( - github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd - github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 - github.com/go-sql-driver/mysql v1.4.1 - github.com/jinzhu/inflection v1.0.0 - github.com/jinzhu/now v1.0.1 - github.com/lib/pq v1.1.1 - github.com/mattn/go-sqlite3 v2.0.1+incompatible - golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd // indirect - google.golang.org/appengine v1.4.0 // indirect -) diff --git a/vendor/github.com/jinzhu/gorm/go.sum b/vendor/github.com/jinzhu/gorm/go.sum deleted file mode 100644 index 915b4c2..0000000 --- a/vendor/github.com/jinzhu/gorm/go.sum +++ /dev/null @@ -1,29 +0,0 @@ -github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM= -github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= -github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.0.1 h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M= -github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw= -github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd h1:GGJVjV8waZKRHrgwvtH66z9ZGVurTD1MT0n1Bb+q4aM= -golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/github.com/jinzhu/gorm/interface.go b/vendor/github.com/jinzhu/gorm/interface.go deleted file mode 100644 index fe64923..0000000 --- a/vendor/github.com/jinzhu/gorm/interface.go +++ /dev/null @@ -1,24 +0,0 @@ -package gorm - -import ( - "context" - "database/sql" -) - -// SQLCommon is the minimal database connection functionality gorm requires. Implemented by *sql.DB. -type SQLCommon interface { - Exec(query string, args ...interface{}) (sql.Result, error) - Prepare(query string) (*sql.Stmt, error) - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryRow(query string, args ...interface{}) *sql.Row -} - -type sqlDb interface { - Begin() (*sql.Tx, error) - BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) -} - -type sqlTx interface { - Commit() error - Rollback() error -} diff --git a/vendor/github.com/jinzhu/gorm/join_table_handler.go b/vendor/github.com/jinzhu/gorm/join_table_handler.go deleted file mode 100644 index a036d46..0000000 --- a/vendor/github.com/jinzhu/gorm/join_table_handler.go +++ /dev/null @@ -1,211 +0,0 @@ -package gorm - -import ( - "errors" - "fmt" - "reflect" - "strings" -) - -// JoinTableHandlerInterface is an interface for how to handle many2many relations -type JoinTableHandlerInterface interface { - // initialize join table handler - Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) - // Table return join table's table name - Table(db *DB) string - // Add create relationship in join table for source and destination - Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error - // Delete delete relationship in join table for sources - Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error - // JoinWith query with `Join` conditions - JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB - // SourceForeignKeys return source foreign keys - SourceForeignKeys() []JoinTableForeignKey - // DestinationForeignKeys return destination foreign keys - DestinationForeignKeys() []JoinTableForeignKey -} - -// JoinTableForeignKey join table foreign key struct -type JoinTableForeignKey struct { - DBName string - AssociationDBName string -} - -// JoinTableSource is a struct that contains model type and foreign keys -type JoinTableSource struct { - ModelType reflect.Type - ForeignKeys []JoinTableForeignKey -} - -// JoinTableHandler default join table handler -type JoinTableHandler struct { - TableName string `sql:"-"` - Source JoinTableSource `sql:"-"` - Destination JoinTableSource `sql:"-"` -} - -// SourceForeignKeys return source foreign keys -func (s *JoinTableHandler) SourceForeignKeys() []JoinTableForeignKey { - return s.Source.ForeignKeys -} - -// DestinationForeignKeys return destination foreign keys -func (s *JoinTableHandler) DestinationForeignKeys() []JoinTableForeignKey { - return s.Destination.ForeignKeys -} - -// Setup initialize a default join table handler -func (s *JoinTableHandler) Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) { - s.TableName = tableName - - s.Source = JoinTableSource{ModelType: source} - s.Source.ForeignKeys = []JoinTableForeignKey{} - for idx, dbName := range relationship.ForeignFieldNames { - s.Source.ForeignKeys = append(s.Source.ForeignKeys, JoinTableForeignKey{ - DBName: relationship.ForeignDBNames[idx], - AssociationDBName: dbName, - }) - } - - s.Destination = JoinTableSource{ModelType: destination} - s.Destination.ForeignKeys = []JoinTableForeignKey{} - for idx, dbName := range relationship.AssociationForeignFieldNames { - s.Destination.ForeignKeys = append(s.Destination.ForeignKeys, JoinTableForeignKey{ - DBName: relationship.AssociationForeignDBNames[idx], - AssociationDBName: dbName, - }) - } -} - -// Table return join table's table name -func (s JoinTableHandler) Table(db *DB) string { - return DefaultTableNameHandler(db, s.TableName) -} - -func (s JoinTableHandler) updateConditionMap(conditionMap map[string]interface{}, db *DB, joinTableSources []JoinTableSource, sources ...interface{}) { - for _, source := range sources { - scope := db.NewScope(source) - modelType := scope.GetModelStruct().ModelType - - for _, joinTableSource := range joinTableSources { - if joinTableSource.ModelType == modelType { - for _, foreignKey := range joinTableSource.ForeignKeys { - if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok { - conditionMap[foreignKey.DBName] = field.Field.Interface() - } - } - break - } - } - } -} - -// Add create relationship in join table for source and destination -func (s JoinTableHandler) Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error { - var ( - scope = db.NewScope("") - conditionMap = map[string]interface{}{} - ) - - // Update condition map for source - s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source}, source) - - // Update condition map for destination - s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Destination}, destination) - - var assignColumns, binVars, conditions []string - var values []interface{} - for key, value := range conditionMap { - assignColumns = append(assignColumns, scope.Quote(key)) - binVars = append(binVars, `?`) - conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key))) - values = append(values, value) - } - - for _, value := range values { - values = append(values, value) - } - - quotedTable := scope.Quote(handler.Table(db)) - sql := fmt.Sprintf( - "INSERT INTO %v (%v) SELECT %v %v WHERE NOT EXISTS (SELECT * FROM %v WHERE %v)", - quotedTable, - strings.Join(assignColumns, ","), - strings.Join(binVars, ","), - scope.Dialect().SelectFromDummyTable(), - quotedTable, - strings.Join(conditions, " AND "), - ) - - return db.Exec(sql, values...).Error -} - -// Delete delete relationship in join table for sources -func (s JoinTableHandler) Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error { - var ( - scope = db.NewScope(nil) - conditions []string - values []interface{} - conditionMap = map[string]interface{}{} - ) - - s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source, s.Destination}, sources...) - - for key, value := range conditionMap { - conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key))) - values = append(values, value) - } - - return db.Table(handler.Table(db)).Where(strings.Join(conditions, " AND "), values...).Delete("").Error -} - -// JoinWith query with `Join` conditions -func (s JoinTableHandler) JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB { - var ( - scope = db.NewScope(source) - tableName = handler.Table(db) - quotedTableName = scope.Quote(tableName) - joinConditions []string - values []interface{} - ) - - if s.Source.ModelType == scope.GetModelStruct().ModelType { - destinationTableName := db.NewScope(reflect.New(s.Destination.ModelType).Interface()).QuotedTableName() - for _, foreignKey := range s.Destination.ForeignKeys { - joinConditions = append(joinConditions, fmt.Sprintf("%v.%v = %v.%v", quotedTableName, scope.Quote(foreignKey.DBName), destinationTableName, scope.Quote(foreignKey.AssociationDBName))) - } - - var foreignDBNames []string - var foreignFieldNames []string - - for _, foreignKey := range s.Source.ForeignKeys { - foreignDBNames = append(foreignDBNames, foreignKey.DBName) - if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok { - foreignFieldNames = append(foreignFieldNames, field.Name) - } - } - - foreignFieldValues := scope.getColumnAsArray(foreignFieldNames, scope.Value) - - var condString string - if len(foreignFieldValues) > 0 { - var quotedForeignDBNames []string - for _, dbName := range foreignDBNames { - quotedForeignDBNames = append(quotedForeignDBNames, tableName+"."+dbName) - } - - condString = fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, quotedForeignDBNames), toQueryMarks(foreignFieldValues)) - - keys := scope.getColumnAsArray(foreignFieldNames, scope.Value) - values = append(values, toQueryValues(keys)) - } else { - condString = fmt.Sprintf("1 <> 1") - } - - return db.Joins(fmt.Sprintf("INNER JOIN %v ON %v", quotedTableName, strings.Join(joinConditions, " AND "))). - Where(condString, toQueryValues(foreignFieldValues)...) - } - - db.Error = errors.New("wrong source type for join table handler") - return db -} diff --git a/vendor/github.com/jinzhu/gorm/logger.go b/vendor/github.com/jinzhu/gorm/logger.go deleted file mode 100644 index 88e167d..0000000 --- a/vendor/github.com/jinzhu/gorm/logger.go +++ /dev/null @@ -1,141 +0,0 @@ -package gorm - -import ( - "database/sql/driver" - "fmt" - "log" - "os" - "reflect" - "regexp" - "strconv" - "time" - "unicode" -) - -var ( - defaultLogger = Logger{log.New(os.Stdout, "\r\n", 0)} - sqlRegexp = regexp.MustCompile(`\?`) - numericPlaceHolderRegexp = regexp.MustCompile(`\$\d+`) -) - -func isPrintable(s string) bool { - for _, r := range s { - if !unicode.IsPrint(r) { - return false - } - } - return true -} - -var LogFormatter = func(values ...interface{}) (messages []interface{}) { - if len(values) > 1 { - var ( - sql string - formattedValues []string - level = values[0] - currentTime = "\n\033[33m[" + NowFunc().Format("2006-01-02 15:04:05") + "]\033[0m" - source = fmt.Sprintf("\033[35m(%v)\033[0m", values[1]) - ) - - messages = []interface{}{source, currentTime} - - if len(values) == 2 { - //remove the line break - currentTime = currentTime[1:] - //remove the brackets - source = fmt.Sprintf("\033[35m%v\033[0m", values[1]) - - messages = []interface{}{currentTime, source} - } - - if level == "sql" { - // duration - messages = append(messages, fmt.Sprintf(" \033[36;1m[%.2fms]\033[0m ", float64(values[2].(time.Duration).Nanoseconds()/1e4)/100.0)) - // sql - - for _, value := range values[4].([]interface{}) { - indirectValue := reflect.Indirect(reflect.ValueOf(value)) - if indirectValue.IsValid() { - value = indirectValue.Interface() - if t, ok := value.(time.Time); ok { - if t.IsZero() { - formattedValues = append(formattedValues, fmt.Sprintf("'%v'", "0000-00-00 00:00:00")) - } else { - formattedValues = append(formattedValues, fmt.Sprintf("'%v'", t.Format("2006-01-02 15:04:05"))) - } - } else if b, ok := value.([]byte); ok { - if str := string(b); isPrintable(str) { - formattedValues = append(formattedValues, fmt.Sprintf("'%v'", str)) - } else { - formattedValues = append(formattedValues, "''") - } - } else if r, ok := value.(driver.Valuer); ok { - if value, err := r.Value(); err == nil && value != nil { - formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value)) - } else { - formattedValues = append(formattedValues, "NULL") - } - } else { - switch value.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool: - formattedValues = append(formattedValues, fmt.Sprintf("%v", value)) - default: - formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value)) - } - } - } else { - formattedValues = append(formattedValues, "NULL") - } - } - - // differentiate between $n placeholders or else treat like ? - if numericPlaceHolderRegexp.MatchString(values[3].(string)) { - sql = values[3].(string) - for index, value := range formattedValues { - placeholder := fmt.Sprintf(`\$%d([^\d]|$)`, index+1) - sql = regexp.MustCompile(placeholder).ReplaceAllString(sql, value+"$1") - } - } else { - formattedValuesLength := len(formattedValues) - for index, value := range sqlRegexp.Split(values[3].(string), -1) { - sql += value - if index < formattedValuesLength { - sql += formattedValues[index] - } - } - } - - messages = append(messages, sql) - messages = append(messages, fmt.Sprintf(" \n\033[36;31m[%v]\033[0m ", strconv.FormatInt(values[5].(int64), 10)+" rows affected or returned ")) - } else { - messages = append(messages, "\033[31;1m") - messages = append(messages, values[2:]...) - messages = append(messages, "\033[0m") - } - } - - return -} - -type logger interface { - Print(v ...interface{}) -} - -// LogWriter log writer interface -type LogWriter interface { - Println(v ...interface{}) -} - -// Logger default logger -type Logger struct { - LogWriter -} - -// Print format & print log -func (logger Logger) Print(values ...interface{}) { - logger.Println(LogFormatter(values...)...) -} - -type nopLogger struct{} - -func (nopLogger) Print(values ...interface{}) {} diff --git a/vendor/github.com/jinzhu/gorm/main.go b/vendor/github.com/jinzhu/gorm/main.go deleted file mode 100644 index 3db8787..0000000 --- a/vendor/github.com/jinzhu/gorm/main.go +++ /dev/null @@ -1,881 +0,0 @@ -package gorm - -import ( - "context" - "database/sql" - "errors" - "fmt" - "reflect" - "strings" - "sync" - "time" -) - -// DB contains information for current db connection -type DB struct { - sync.RWMutex - Value interface{} - Error error - RowsAffected int64 - - // single db - db SQLCommon - blockGlobalUpdate bool - logMode logModeValue - logger logger - search *search - values sync.Map - - // global db - parent *DB - callbacks *Callback - dialect Dialect - singularTable bool - - // function to be used to override the creating of a new timestamp - nowFuncOverride func() time.Time -} - -type logModeValue int - -const ( - defaultLogMode logModeValue = iota - noLogMode - detailedLogMode -) - -// Open initialize a new db connection, need to import driver first, e.g: -// -// import _ "github.com/go-sql-driver/mysql" -// func main() { -// db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local") -// } -// GORM has wrapped some drivers, for easier to remember driver's import path, so you could import the mysql driver with -// import _ "github.com/jinzhu/gorm/dialects/mysql" -// // import _ "github.com/jinzhu/gorm/dialects/postgres" -// // import _ "github.com/jinzhu/gorm/dialects/sqlite" -// // import _ "github.com/jinzhu/gorm/dialects/mssql" -func Open(dialect string, args ...interface{}) (db *DB, err error) { - if len(args) == 0 { - err = errors.New("invalid database source") - return nil, err - } - var source string - var dbSQL SQLCommon - var ownDbSQL bool - - switch value := args[0].(type) { - case string: - var driver = dialect - if len(args) == 1 { - source = value - } else if len(args) >= 2 { - driver = value - source = args[1].(string) - } - dbSQL, err = sql.Open(driver, source) - ownDbSQL = true - case SQLCommon: - dbSQL = value - ownDbSQL = false - default: - return nil, fmt.Errorf("invalid database source: %v is not a valid type", value) - } - - db = &DB{ - db: dbSQL, - logger: defaultLogger, - callbacks: DefaultCallback, - dialect: newDialect(dialect, dbSQL), - } - db.parent = db - if err != nil { - return - } - // Send a ping to make sure the database connection is alive. - if d, ok := dbSQL.(*sql.DB); ok { - if err = d.Ping(); err != nil && ownDbSQL { - d.Close() - } - } - return -} - -// New clone a new db connection without search conditions -func (s *DB) New() *DB { - clone := s.clone() - clone.search = nil - clone.Value = nil - return clone -} - -type closer interface { - Close() error -} - -// Close close current db connection. If database connection is not an io.Closer, returns an error. -func (s *DB) Close() error { - if db, ok := s.parent.db.(closer); ok { - return db.Close() - } - return errors.New("can't close current db") -} - -// DB get `*sql.DB` from current connection -// If the underlying database connection is not a *sql.DB, returns nil -func (s *DB) DB() *sql.DB { - db, ok := s.db.(*sql.DB) - if !ok { - panic("can't support full GORM on currently status, maybe this is a TX instance.") - } - return db -} - -// CommonDB return the underlying `*sql.DB` or `*sql.Tx` instance, mainly intended to allow coexistence with legacy non-GORM code. -func (s *DB) CommonDB() SQLCommon { - return s.db -} - -// Dialect get dialect -func (s *DB) Dialect() Dialect { - return s.dialect -} - -// Callback return `Callbacks` container, you could add/change/delete callbacks with it -// db.Callback().Create().Register("update_created_at", updateCreated) -// Refer https://jinzhu.github.io/gorm/development.html#callbacks -func (s *DB) Callback() *Callback { - s.parent.callbacks = s.parent.callbacks.clone(s.logger) - return s.parent.callbacks -} - -// SetLogger replace default logger -func (s *DB) SetLogger(log logger) { - s.logger = log -} - -// LogMode set log mode, `true` for detailed logs, `false` for no log, default, will only print error logs -func (s *DB) LogMode(enable bool) *DB { - if enable { - s.logMode = detailedLogMode - } else { - s.logMode = noLogMode - } - return s -} - -// SetNowFuncOverride set the function to be used when creating a new timestamp -func (s *DB) SetNowFuncOverride(nowFuncOverride func() time.Time) *DB { - s.nowFuncOverride = nowFuncOverride - return s -} - -// Get a new timestamp, using the provided nowFuncOverride on the DB instance if set, -// otherwise defaults to the global NowFunc() -func (s *DB) nowFunc() time.Time { - if s.nowFuncOverride != nil { - return s.nowFuncOverride() - } - - return NowFunc() -} - -// BlockGlobalUpdate if true, generates an error on update/delete without where clause. -// This is to prevent eventual error with empty objects updates/deletions -func (s *DB) BlockGlobalUpdate(enable bool) *DB { - s.blockGlobalUpdate = enable - return s -} - -// HasBlockGlobalUpdate return state of block -func (s *DB) HasBlockGlobalUpdate() bool { - return s.blockGlobalUpdate -} - -// SingularTable use singular table by default -func (s *DB) SingularTable(enable bool) { - s.parent.Lock() - defer s.parent.Unlock() - s.parent.singularTable = enable -} - -// NewScope create a scope for current operation -func (s *DB) NewScope(value interface{}) *Scope { - dbClone := s.clone() - dbClone.Value = value - scope := &Scope{db: dbClone, Value: value} - if s.search != nil { - scope.Search = s.search.clone() - } else { - scope.Search = &search{} - } - return scope -} - -// QueryExpr returns the query as SqlExpr object -func (s *DB) QueryExpr() *SqlExpr { - scope := s.NewScope(s.Value) - scope.InstanceSet("skip_bindvar", true) - scope.prepareQuerySQL() - - return Expr(scope.SQL, scope.SQLVars...) -} - -// SubQuery returns the query as sub query -func (s *DB) SubQuery() *SqlExpr { - scope := s.NewScope(s.Value) - scope.InstanceSet("skip_bindvar", true) - scope.prepareQuerySQL() - - return Expr(fmt.Sprintf("(%v)", scope.SQL), scope.SQLVars...) -} - -// Where return a new relation, filter records with given conditions, accepts `map`, `struct` or `string` as conditions, refer http://jinzhu.github.io/gorm/crud.html#query -func (s *DB) Where(query interface{}, args ...interface{}) *DB { - return s.clone().search.Where(query, args...).db -} - -// Or filter records that match before conditions or this one, similar to `Where` -func (s *DB) Or(query interface{}, args ...interface{}) *DB { - return s.clone().search.Or(query, args...).db -} - -// Not filter records that don't match current conditions, similar to `Where` -func (s *DB) Not(query interface{}, args ...interface{}) *DB { - return s.clone().search.Not(query, args...).db -} - -// Limit specify the number of records to be retrieved -func (s *DB) Limit(limit interface{}) *DB { - return s.clone().search.Limit(limit).db -} - -// Offset specify the number of records to skip before starting to return the records -func (s *DB) Offset(offset interface{}) *DB { - return s.clone().search.Offset(offset).db -} - -// Order specify order when retrieve records from database, set reorder to `true` to overwrite defined conditions -// db.Order("name DESC") -// db.Order("name DESC", true) // reorder -// db.Order(gorm.Expr("name = ? DESC", "first")) // sql expression -func (s *DB) Order(value interface{}, reorder ...bool) *DB { - return s.clone().search.Order(value, reorder...).db -} - -// Select specify fields that you want to retrieve from database when querying, by default, will select all fields; -// When creating/updating, specify fields that you want to save to database -func (s *DB) Select(query interface{}, args ...interface{}) *DB { - return s.clone().search.Select(query, args...).db -} - -// Omit specify fields that you want to ignore when saving to database for creating, updating -func (s *DB) Omit(columns ...string) *DB { - return s.clone().search.Omit(columns...).db -} - -// Group specify the group method on the find -func (s *DB) Group(query string) *DB { - return s.clone().search.Group(query).db -} - -// Having specify HAVING conditions for GROUP BY -func (s *DB) Having(query interface{}, values ...interface{}) *DB { - return s.clone().search.Having(query, values...).db -} - -// Joins specify Joins conditions -// db.Joins("JOIN emails ON emails.user_id = users.id AND emails.email = ?", "jinzhu@example.org").Find(&user) -func (s *DB) Joins(query string, args ...interface{}) *DB { - return s.clone().search.Joins(query, args...).db -} - -// Scopes pass current database connection to arguments `func(*DB) *DB`, which could be used to add conditions dynamically -// func AmountGreaterThan1000(db *gorm.DB) *gorm.DB { -// return db.Where("amount > ?", 1000) -// } -// -// func OrderStatus(status []string) func (db *gorm.DB) *gorm.DB { -// return func (db *gorm.DB) *gorm.DB { -// return db.Scopes(AmountGreaterThan1000).Where("status in (?)", status) -// } -// } -// -// db.Scopes(AmountGreaterThan1000, OrderStatus([]string{"paid", "shipped"})).Find(&orders) -// Refer https://jinzhu.github.io/gorm/crud.html#scopes -func (s *DB) Scopes(funcs ...func(*DB) *DB) *DB { - for _, f := range funcs { - s = f(s) - } - return s -} - -// Unscoped return all record including deleted record, refer Soft Delete https://jinzhu.github.io/gorm/crud.html#soft-delete -func (s *DB) Unscoped() *DB { - return s.clone().search.unscoped().db -} - -// Attrs initialize struct with argument if record not found with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate -func (s *DB) Attrs(attrs ...interface{}) *DB { - return s.clone().search.Attrs(attrs...).db -} - -// Assign assign result with argument regardless it is found or not with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate -func (s *DB) Assign(attrs ...interface{}) *DB { - return s.clone().search.Assign(attrs...).db -} - -// First find first record that match given conditions, order by primary key -func (s *DB) First(out interface{}, where ...interface{}) *DB { - newScope := s.NewScope(out) - newScope.Search.Limit(1) - - return newScope.Set("gorm:order_by_primary_key", "ASC"). - inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db -} - -// Take return a record that match given conditions, the order will depend on the database implementation -func (s *DB) Take(out interface{}, where ...interface{}) *DB { - newScope := s.NewScope(out) - newScope.Search.Limit(1) - return newScope.inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db -} - -// Last find last record that match given conditions, order by primary key -func (s *DB) Last(out interface{}, where ...interface{}) *DB { - newScope := s.NewScope(out) - newScope.Search.Limit(1) - return newScope.Set("gorm:order_by_primary_key", "DESC"). - inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db -} - -// Find find records that match given conditions -func (s *DB) Find(out interface{}, where ...interface{}) *DB { - return s.NewScope(out).inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db -} - -//Preloads preloads relations, don`t touch out -func (s *DB) Preloads(out interface{}) *DB { - return s.NewScope(out).InstanceSet("gorm:only_preload", 1).callCallbacks(s.parent.callbacks.queries).db -} - -// Scan scan value to a struct -func (s *DB) Scan(dest interface{}) *DB { - return s.NewScope(s.Value).Set("gorm:query_destination", dest).callCallbacks(s.parent.callbacks.queries).db -} - -// Row return `*sql.Row` with given conditions -func (s *DB) Row() *sql.Row { - return s.NewScope(s.Value).row() -} - -// Rows return `*sql.Rows` with given conditions -func (s *DB) Rows() (*sql.Rows, error) { - return s.NewScope(s.Value).rows() -} - -// ScanRows scan `*sql.Rows` to give struct -func (s *DB) ScanRows(rows *sql.Rows, result interface{}) error { - var ( - scope = s.NewScope(result) - clone = scope.db - columns, err = rows.Columns() - ) - - if clone.AddError(err) == nil { - scope.scan(rows, columns, scope.Fields()) - } - - return clone.Error -} - -// Pluck used to query single column from a model as a map -// var ages []int64 -// db.Find(&users).Pluck("age", &ages) -func (s *DB) Pluck(column string, value interface{}) *DB { - return s.NewScope(s.Value).pluck(column, value).db -} - -// Count get how many records for a model -func (s *DB) Count(value interface{}) *DB { - return s.NewScope(s.Value).count(value).db -} - -// Related get related associations -func (s *DB) Related(value interface{}, foreignKeys ...string) *DB { - return s.NewScope(s.Value).related(value, foreignKeys...).db -} - -// FirstOrInit find first matched record or initialize a new one with given conditions (only works with struct, map conditions) -// https://jinzhu.github.io/gorm/crud.html#firstorinit -func (s *DB) FirstOrInit(out interface{}, where ...interface{}) *DB { - c := s.clone() - if result := c.First(out, where...); result.Error != nil { - if !result.RecordNotFound() { - return result - } - c.NewScope(out).inlineCondition(where...).initialize() - } else { - c.NewScope(out).updatedAttrsWithValues(c.search.assignAttrs) - } - return c -} - -// FirstOrCreate find first matched record or create a new one with given conditions (only works with struct, map conditions) -// https://jinzhu.github.io/gorm/crud.html#firstorcreate -func (s *DB) FirstOrCreate(out interface{}, where ...interface{}) *DB { - c := s.clone() - if result := s.First(out, where...); result.Error != nil { - if !result.RecordNotFound() { - return result - } - return c.NewScope(out).inlineCondition(where...).initialize().callCallbacks(c.parent.callbacks.creates).db - } else if len(c.search.assignAttrs) > 0 { - return c.NewScope(out).InstanceSet("gorm:update_interface", c.search.assignAttrs).callCallbacks(c.parent.callbacks.updates).db - } - return c -} - -// Update update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update -// WARNING when update with struct, GORM will not update fields that with zero value -func (s *DB) Update(attrs ...interface{}) *DB { - return s.Updates(toSearchableMap(attrs...), true) -} - -// Updates update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update -func (s *DB) Updates(values interface{}, ignoreProtectedAttrs ...bool) *DB { - return s.NewScope(s.Value). - Set("gorm:ignore_protected_attrs", len(ignoreProtectedAttrs) > 0). - InstanceSet("gorm:update_interface", values). - callCallbacks(s.parent.callbacks.updates).db -} - -// UpdateColumn update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update -func (s *DB) UpdateColumn(attrs ...interface{}) *DB { - return s.UpdateColumns(toSearchableMap(attrs...)) -} - -// UpdateColumns update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update -func (s *DB) UpdateColumns(values interface{}) *DB { - return s.NewScope(s.Value). - Set("gorm:update_column", true). - Set("gorm:save_associations", false). - InstanceSet("gorm:update_interface", values). - callCallbacks(s.parent.callbacks.updates).db -} - -// Save update value in database, if the value doesn't have primary key, will insert it -func (s *DB) Save(value interface{}) *DB { - scope := s.NewScope(value) - if !scope.PrimaryKeyZero() { - newDB := scope.callCallbacks(s.parent.callbacks.updates).db - if newDB.Error == nil && newDB.RowsAffected == 0 { - return s.New().Table(scope.TableName()).FirstOrCreate(value) - } - return newDB - } - return scope.callCallbacks(s.parent.callbacks.creates).db -} - -// Create insert the value into database -func (s *DB) Create(value interface{}) *DB { - scope := s.NewScope(value) - return scope.callCallbacks(s.parent.callbacks.creates).db -} - -// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition -// WARNING If model has DeletedAt field, GORM will only set field DeletedAt's value to current time -func (s *DB) Delete(value interface{}, where ...interface{}) *DB { - return s.NewScope(value).inlineCondition(where...).callCallbacks(s.parent.callbacks.deletes).db -} - -// Raw use raw sql as conditions, won't run it unless invoked by other methods -// db.Raw("SELECT name, age FROM users WHERE name = ?", 3).Scan(&result) -func (s *DB) Raw(sql string, values ...interface{}) *DB { - return s.clone().search.Raw(true).Where(sql, values...).db -} - -// Exec execute raw sql -func (s *DB) Exec(sql string, values ...interface{}) *DB { - scope := s.NewScope(nil) - generatedSQL := scope.buildCondition(map[string]interface{}{"query": sql, "args": values}, true) - generatedSQL = strings.TrimSuffix(strings.TrimPrefix(generatedSQL, "("), ")") - scope.Raw(generatedSQL) - return scope.Exec().db -} - -// Model specify the model you would like to run db operations -// // update all users's name to `hello` -// db.Model(&User{}).Update("name", "hello") -// // if user's primary key is non-blank, will use it as condition, then will only update the user's name to `hello` -// db.Model(&user).Update("name", "hello") -func (s *DB) Model(value interface{}) *DB { - c := s.clone() - c.Value = value - return c -} - -// Table specify the table you would like to run db operations -func (s *DB) Table(name string) *DB { - clone := s.clone() - clone.search.Table(name) - clone.Value = nil - return clone -} - -// Debug start debug mode -func (s *DB) Debug() *DB { - return s.clone().LogMode(true) -} - -// Transaction start a transaction as a block, -// return error will rollback, otherwise to commit. -func (s *DB) Transaction(fc func(tx *DB) error) (err error) { - panicked := true - tx := s.Begin() - defer func() { - // Make sure to rollback when panic, Block error or Commit error - if panicked || err != nil { - tx.Rollback() - } - }() - - err = fc(tx) - - if err == nil { - err = tx.Commit().Error - } - - panicked = false - return -} - -// Begin begins a transaction -func (s *DB) Begin() *DB { - return s.BeginTx(context.Background(), &sql.TxOptions{}) -} - -// BeginTx begins a transaction with options -func (s *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) *DB { - c := s.clone() - if db, ok := c.db.(sqlDb); ok && db != nil { - tx, err := db.BeginTx(ctx, opts) - c.db = interface{}(tx).(SQLCommon) - - c.dialect.SetDB(c.db) - c.AddError(err) - } else { - c.AddError(ErrCantStartTransaction) - } - return c -} - -// Commit commit a transaction -func (s *DB) Commit() *DB { - var emptySQLTx *sql.Tx - if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx { - s.AddError(db.Commit()) - } else { - s.AddError(ErrInvalidTransaction) - } - return s -} - -// Rollback rollback a transaction -func (s *DB) Rollback() *DB { - var emptySQLTx *sql.Tx - if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx { - if err := db.Rollback(); err != nil && err != sql.ErrTxDone { - s.AddError(err) - } - } else { - s.AddError(ErrInvalidTransaction) - } - return s -} - -// RollbackUnlessCommitted rollback a transaction if it has not yet been -// committed. -func (s *DB) RollbackUnlessCommitted() *DB { - var emptySQLTx *sql.Tx - if db, ok := s.db.(sqlTx); ok && db != nil && db != emptySQLTx { - err := db.Rollback() - // Ignore the error indicating that the transaction has already - // been committed. - if err != sql.ErrTxDone { - s.AddError(err) - } - } else { - s.AddError(ErrInvalidTransaction) - } - return s -} - -// NewRecord check if value's primary key is blank -func (s *DB) NewRecord(value interface{}) bool { - return s.NewScope(value).PrimaryKeyZero() -} - -// RecordNotFound check if returning ErrRecordNotFound error -func (s *DB) RecordNotFound() bool { - for _, err := range s.GetErrors() { - if err == ErrRecordNotFound { - return true - } - } - return false -} - -// CreateTable create table for models -func (s *DB) CreateTable(models ...interface{}) *DB { - db := s.Unscoped() - for _, model := range models { - db = db.NewScope(model).createTable().db - } - return db -} - -// DropTable drop table for models -func (s *DB) DropTable(values ...interface{}) *DB { - db := s.clone() - for _, value := range values { - if tableName, ok := value.(string); ok { - db = db.Table(tableName) - } - - db = db.NewScope(value).dropTable().db - } - return db -} - -// DropTableIfExists drop table if it is exist -func (s *DB) DropTableIfExists(values ...interface{}) *DB { - db := s.clone() - for _, value := range values { - if s.HasTable(value) { - db.AddError(s.DropTable(value).Error) - } - } - return db -} - -// HasTable check has table or not -func (s *DB) HasTable(value interface{}) bool { - var ( - scope = s.NewScope(value) - tableName string - ) - - if name, ok := value.(string); ok { - tableName = name - } else { - tableName = scope.TableName() - } - - has := scope.Dialect().HasTable(tableName) - s.AddError(scope.db.Error) - return has -} - -// AutoMigrate run auto migration for given models, will only add missing fields, won't delete/change current data -func (s *DB) AutoMigrate(values ...interface{}) *DB { - db := s.Unscoped() - for _, value := range values { - db = db.NewScope(value).autoMigrate().db - } - return db -} - -// ModifyColumn modify column to type -func (s *DB) ModifyColumn(column string, typ string) *DB { - scope := s.NewScope(s.Value) - scope.modifyColumn(column, typ) - return scope.db -} - -// DropColumn drop a column -func (s *DB) DropColumn(column string) *DB { - scope := s.NewScope(s.Value) - scope.dropColumn(column) - return scope.db -} - -// AddIndex add index for columns with given name -func (s *DB) AddIndex(indexName string, columns ...string) *DB { - scope := s.Unscoped().NewScope(s.Value) - scope.addIndex(false, indexName, columns...) - return scope.db -} - -// AddUniqueIndex add unique index for columns with given name -func (s *DB) AddUniqueIndex(indexName string, columns ...string) *DB { - scope := s.Unscoped().NewScope(s.Value) - scope.addIndex(true, indexName, columns...) - return scope.db -} - -// RemoveIndex remove index with name -func (s *DB) RemoveIndex(indexName string) *DB { - scope := s.NewScope(s.Value) - scope.removeIndex(indexName) - return scope.db -} - -// AddForeignKey Add foreign key to the given scope, e.g: -// db.Model(&User{}).AddForeignKey("city_id", "cities(id)", "RESTRICT", "RESTRICT") -func (s *DB) AddForeignKey(field string, dest string, onDelete string, onUpdate string) *DB { - scope := s.NewScope(s.Value) - scope.addForeignKey(field, dest, onDelete, onUpdate) - return scope.db -} - -// RemoveForeignKey Remove foreign key from the given scope, e.g: -// db.Model(&User{}).RemoveForeignKey("city_id", "cities(id)") -func (s *DB) RemoveForeignKey(field string, dest string) *DB { - scope := s.clone().NewScope(s.Value) - scope.removeForeignKey(field, dest) - return scope.db -} - -// Association start `Association Mode` to handler relations things easir in that mode, refer: https://jinzhu.github.io/gorm/associations.html#association-mode -func (s *DB) Association(column string) *Association { - var err error - var scope = s.Set("gorm:association:source", s.Value).NewScope(s.Value) - - if primaryField := scope.PrimaryField(); primaryField.IsBlank { - err = errors.New("primary key can't be nil") - } else { - if field, ok := scope.FieldByName(column); ok { - if field.Relationship == nil || len(field.Relationship.ForeignFieldNames) == 0 { - err = fmt.Errorf("invalid association %v for %v", column, scope.IndirectValue().Type()) - } else { - return &Association{scope: scope, column: column, field: field} - } - } else { - err = fmt.Errorf("%v doesn't have column %v", scope.IndirectValue().Type(), column) - } - } - - return &Association{Error: err} -} - -// Preload preload associations with given conditions -// db.Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users) -func (s *DB) Preload(column string, conditions ...interface{}) *DB { - return s.clone().search.Preload(column, conditions...).db -} - -// Set set setting by name, which could be used in callbacks, will clone a new db, and update its setting -func (s *DB) Set(name string, value interface{}) *DB { - return s.clone().InstantSet(name, value) -} - -// InstantSet instant set setting, will affect current db -func (s *DB) InstantSet(name string, value interface{}) *DB { - s.values.Store(name, value) - return s -} - -// Get get setting by name -func (s *DB) Get(name string) (value interface{}, ok bool) { - value, ok = s.values.Load(name) - return -} - -// SetJoinTableHandler set a model's join table handler for a relation -func (s *DB) SetJoinTableHandler(source interface{}, column string, handler JoinTableHandlerInterface) { - scope := s.NewScope(source) - for _, field := range scope.GetModelStruct().StructFields { - if field.Name == column || field.DBName == column { - if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" { - source := (&Scope{Value: source}).GetModelStruct().ModelType - destination := (&Scope{Value: reflect.New(field.Struct.Type).Interface()}).GetModelStruct().ModelType - handler.Setup(field.Relationship, many2many, source, destination) - field.Relationship.JoinTableHandler = handler - if table := handler.Table(s); scope.Dialect().HasTable(table) { - s.Table(table).AutoMigrate(handler) - } - } - } - } -} - -// AddError add error to the db -func (s *DB) AddError(err error) error { - if err != nil { - if err != ErrRecordNotFound { - if s.logMode == defaultLogMode { - go s.print("error", fileWithLineNum(), err) - } else { - s.log(err) - } - - errors := Errors(s.GetErrors()) - errors = errors.Add(err) - if len(errors) > 1 { - err = errors - } - } - - s.Error = err - } - return err -} - -// GetErrors get happened errors from the db -func (s *DB) GetErrors() []error { - if errs, ok := s.Error.(Errors); ok { - return errs - } else if s.Error != nil { - return []error{s.Error} - } - return []error{} -} - -//////////////////////////////////////////////////////////////////////////////// -// Private Methods For DB -//////////////////////////////////////////////////////////////////////////////// - -func (s *DB) clone() *DB { - db := &DB{ - db: s.db, - parent: s.parent, - logger: s.logger, - logMode: s.logMode, - Value: s.Value, - Error: s.Error, - blockGlobalUpdate: s.blockGlobalUpdate, - dialect: newDialect(s.dialect.GetName(), s.db), - nowFuncOverride: s.nowFuncOverride, - } - - s.values.Range(func(k, v interface{}) bool { - db.values.Store(k, v) - return true - }) - - if s.search == nil { - db.search = &search{limit: -1, offset: -1} - } else { - db.search = s.search.clone() - } - - db.search.db = db - return db -} - -func (s *DB) print(v ...interface{}) { - s.logger.Print(v...) -} - -func (s *DB) log(v ...interface{}) { - if s != nil && s.logMode == detailedLogMode { - s.print(append([]interface{}{"log", fileWithLineNum()}, v...)...) - } -} - -func (s *DB) slog(sql string, t time.Time, vars ...interface{}) { - if s.logMode == detailedLogMode { - s.print("sql", fileWithLineNum(), NowFunc().Sub(t), sql, vars, s.RowsAffected) - } -} diff --git a/vendor/github.com/jinzhu/gorm/model.go b/vendor/github.com/jinzhu/gorm/model.go deleted file mode 100644 index f37ff7e..0000000 --- a/vendor/github.com/jinzhu/gorm/model.go +++ /dev/null @@ -1,14 +0,0 @@ -package gorm - -import "time" - -// Model base model definition, including fields `ID`, `CreatedAt`, `UpdatedAt`, `DeletedAt`, which could be embedded in your models -// type User struct { -// gorm.Model -// } -type Model struct { - ID uint `gorm:"primary_key"` - CreatedAt time.Time - UpdatedAt time.Time - DeletedAt *time.Time `sql:"index"` -} diff --git a/vendor/github.com/jinzhu/gorm/model_struct.go b/vendor/github.com/jinzhu/gorm/model_struct.go deleted file mode 100644 index d9e2e90..0000000 --- a/vendor/github.com/jinzhu/gorm/model_struct.go +++ /dev/null @@ -1,671 +0,0 @@ -package gorm - -import ( - "database/sql" - "errors" - "go/ast" - "reflect" - "strings" - "sync" - "time" - - "github.com/jinzhu/inflection" -) - -// DefaultTableNameHandler default table name handler -var DefaultTableNameHandler = func(db *DB, defaultTableName string) string { - return defaultTableName -} - -// lock for mutating global cached model metadata -var structsLock sync.Mutex - -// global cache of model metadata -var modelStructsMap sync.Map - -// ModelStruct model definition -type ModelStruct struct { - PrimaryFields []*StructField - StructFields []*StructField - ModelType reflect.Type - - defaultTableName string - l sync.Mutex -} - -// TableName returns model's table name -func (s *ModelStruct) TableName(db *DB) string { - s.l.Lock() - defer s.l.Unlock() - - if s.defaultTableName == "" && db != nil && s.ModelType != nil { - // Set default table name - if tabler, ok := reflect.New(s.ModelType).Interface().(tabler); ok { - s.defaultTableName = tabler.TableName() - } else { - tableName := ToTableName(s.ModelType.Name()) - db.parent.RLock() - if db == nil || (db.parent != nil && !db.parent.singularTable) { - tableName = inflection.Plural(tableName) - } - db.parent.RUnlock() - s.defaultTableName = tableName - } - } - - return DefaultTableNameHandler(db, s.defaultTableName) -} - -// StructField model field's struct definition -type StructField struct { - DBName string - Name string - Names []string - IsPrimaryKey bool - IsNormal bool - IsIgnored bool - IsScanner bool - HasDefaultValue bool - Tag reflect.StructTag - TagSettings map[string]string - Struct reflect.StructField - IsForeignKey bool - Relationship *Relationship - - tagSettingsLock sync.RWMutex -} - -// TagSettingsSet Sets a tag in the tag settings map -func (sf *StructField) TagSettingsSet(key, val string) { - sf.tagSettingsLock.Lock() - defer sf.tagSettingsLock.Unlock() - sf.TagSettings[key] = val -} - -// TagSettingsGet returns a tag from the tag settings -func (sf *StructField) TagSettingsGet(key string) (string, bool) { - sf.tagSettingsLock.RLock() - defer sf.tagSettingsLock.RUnlock() - val, ok := sf.TagSettings[key] - return val, ok -} - -// TagSettingsDelete deletes a tag -func (sf *StructField) TagSettingsDelete(key string) { - sf.tagSettingsLock.Lock() - defer sf.tagSettingsLock.Unlock() - delete(sf.TagSettings, key) -} - -func (sf *StructField) clone() *StructField { - clone := &StructField{ - DBName: sf.DBName, - Name: sf.Name, - Names: sf.Names, - IsPrimaryKey: sf.IsPrimaryKey, - IsNormal: sf.IsNormal, - IsIgnored: sf.IsIgnored, - IsScanner: sf.IsScanner, - HasDefaultValue: sf.HasDefaultValue, - Tag: sf.Tag, - TagSettings: map[string]string{}, - Struct: sf.Struct, - IsForeignKey: sf.IsForeignKey, - } - - if sf.Relationship != nil { - relationship := *sf.Relationship - clone.Relationship = &relationship - } - - // copy the struct field tagSettings, they should be read-locked while they are copied - sf.tagSettingsLock.Lock() - defer sf.tagSettingsLock.Unlock() - for key, value := range sf.TagSettings { - clone.TagSettings[key] = value - } - - return clone -} - -// Relationship described the relationship between models -type Relationship struct { - Kind string - PolymorphicType string - PolymorphicDBName string - PolymorphicValue string - ForeignFieldNames []string - ForeignDBNames []string - AssociationForeignFieldNames []string - AssociationForeignDBNames []string - JoinTableHandler JoinTableHandlerInterface -} - -func getForeignField(column string, fields []*StructField) *StructField { - for _, field := range fields { - if field.Name == column || field.DBName == column || field.DBName == ToColumnName(column) { - return field - } - } - return nil -} - -// GetModelStruct get value's model struct, relationships based on struct and tag definition -func (scope *Scope) GetModelStruct() *ModelStruct { - var modelStruct ModelStruct - // Scope value can't be nil - if scope.Value == nil { - return &modelStruct - } - - reflectType := reflect.ValueOf(scope.Value).Type() - for reflectType.Kind() == reflect.Slice || reflectType.Kind() == reflect.Ptr { - reflectType = reflectType.Elem() - } - - // Scope value need to be a struct - if reflectType.Kind() != reflect.Struct { - return &modelStruct - } - - // Get Cached model struct - isSingularTable := false - if scope.db != nil && scope.db.parent != nil { - scope.db.parent.RLock() - isSingularTable = scope.db.parent.singularTable - scope.db.parent.RUnlock() - } - - hashKey := struct { - singularTable bool - reflectType reflect.Type - }{isSingularTable, reflectType} - if value, ok := modelStructsMap.Load(hashKey); ok && value != nil { - return value.(*ModelStruct) - } - - modelStruct.ModelType = reflectType - - // Get all fields - for i := 0; i < reflectType.NumField(); i++ { - if fieldStruct := reflectType.Field(i); ast.IsExported(fieldStruct.Name) { - field := &StructField{ - Struct: fieldStruct, - Name: fieldStruct.Name, - Names: []string{fieldStruct.Name}, - Tag: fieldStruct.Tag, - TagSettings: parseTagSetting(fieldStruct.Tag), - } - - // is ignored field - if _, ok := field.TagSettingsGet("-"); ok { - field.IsIgnored = true - } else { - if _, ok := field.TagSettingsGet("PRIMARY_KEY"); ok { - field.IsPrimaryKey = true - modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field) - } - - if _, ok := field.TagSettingsGet("DEFAULT"); ok && !field.IsPrimaryKey { - field.HasDefaultValue = true - } - - if _, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok && !field.IsPrimaryKey { - field.HasDefaultValue = true - } - - indirectType := fieldStruct.Type - for indirectType.Kind() == reflect.Ptr { - indirectType = indirectType.Elem() - } - - fieldValue := reflect.New(indirectType).Interface() - if _, isScanner := fieldValue.(sql.Scanner); isScanner { - // is scanner - field.IsScanner, field.IsNormal = true, true - if indirectType.Kind() == reflect.Struct { - for i := 0; i < indirectType.NumField(); i++ { - for key, value := range parseTagSetting(indirectType.Field(i).Tag) { - if _, ok := field.TagSettingsGet(key); !ok { - field.TagSettingsSet(key, value) - } - } - } - } - } else if _, isTime := fieldValue.(*time.Time); isTime { - // is time - field.IsNormal = true - } else if _, ok := field.TagSettingsGet("EMBEDDED"); ok || fieldStruct.Anonymous { - // is embedded struct - for _, subField := range scope.New(fieldValue).GetModelStruct().StructFields { - subField = subField.clone() - subField.Names = append([]string{fieldStruct.Name}, subField.Names...) - if prefix, ok := field.TagSettingsGet("EMBEDDED_PREFIX"); ok { - subField.DBName = prefix + subField.DBName - } - - if subField.IsPrimaryKey { - if _, ok := subField.TagSettingsGet("PRIMARY_KEY"); ok { - modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, subField) - } else { - subField.IsPrimaryKey = false - } - } - - if subField.Relationship != nil && subField.Relationship.JoinTableHandler != nil { - if joinTableHandler, ok := subField.Relationship.JoinTableHandler.(*JoinTableHandler); ok { - newJoinTableHandler := &JoinTableHandler{} - newJoinTableHandler.Setup(subField.Relationship, joinTableHandler.TableName, reflectType, joinTableHandler.Destination.ModelType) - subField.Relationship.JoinTableHandler = newJoinTableHandler - } - } - - modelStruct.StructFields = append(modelStruct.StructFields, subField) - } - continue - } else { - // build relationships - switch indirectType.Kind() { - case reflect.Slice: - defer func(field *StructField) { - var ( - relationship = &Relationship{} - toScope = scope.New(reflect.New(field.Struct.Type).Interface()) - foreignKeys []string - associationForeignKeys []string - elemType = field.Struct.Type - ) - - if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" { - foreignKeys = strings.Split(foreignKey, ",") - } - - if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" { - associationForeignKeys = strings.Split(foreignKey, ",") - } else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" { - associationForeignKeys = strings.Split(foreignKey, ",") - } - - for elemType.Kind() == reflect.Slice || elemType.Kind() == reflect.Ptr { - elemType = elemType.Elem() - } - - if elemType.Kind() == reflect.Struct { - if many2many, _ := field.TagSettingsGet("MANY2MANY"); many2many != "" { - relationship.Kind = "many_to_many" - - { // Foreign Keys for Source - joinTableDBNames := []string{} - - if foreignKey, _ := field.TagSettingsGet("JOINTABLE_FOREIGNKEY"); foreignKey != "" { - joinTableDBNames = strings.Split(foreignKey, ",") - } - - // if no foreign keys defined with tag - if len(foreignKeys) == 0 { - for _, field := range modelStruct.PrimaryFields { - foreignKeys = append(foreignKeys, field.DBName) - } - } - - for idx, foreignKey := range foreignKeys { - if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil { - // source foreign keys (db names) - relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.DBName) - - // setup join table foreign keys for source - if len(joinTableDBNames) > idx { - // if defined join table's foreign key - relationship.ForeignDBNames = append(relationship.ForeignDBNames, joinTableDBNames[idx]) - } else { - defaultJointableForeignKey := ToColumnName(reflectType.Name()) + "_" + foreignField.DBName - relationship.ForeignDBNames = append(relationship.ForeignDBNames, defaultJointableForeignKey) - } - } - } - } - - { // Foreign Keys for Association (Destination) - associationJoinTableDBNames := []string{} - - if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_JOINTABLE_FOREIGNKEY"); foreignKey != "" { - associationJoinTableDBNames = strings.Split(foreignKey, ",") - } - - // if no association foreign keys defined with tag - if len(associationForeignKeys) == 0 { - for _, field := range toScope.PrimaryFields() { - associationForeignKeys = append(associationForeignKeys, field.DBName) - } - } - - for idx, name := range associationForeignKeys { - if field, ok := toScope.FieldByName(name); ok { - // association foreign keys (db names) - relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.DBName) - - // setup join table foreign keys for association - if len(associationJoinTableDBNames) > idx { - relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationJoinTableDBNames[idx]) - } else { - // join table foreign keys for association - joinTableDBName := ToColumnName(elemType.Name()) + "_" + field.DBName - relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, joinTableDBName) - } - } - } - } - - joinTableHandler := JoinTableHandler{} - joinTableHandler.Setup(relationship, many2many, reflectType, elemType) - relationship.JoinTableHandler = &joinTableHandler - field.Relationship = relationship - } else { - // User has many comments, associationType is User, comment use UserID as foreign key - var associationType = reflectType.Name() - var toFields = toScope.GetStructFields() - relationship.Kind = "has_many" - - if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" { - // Dog has many toys, tag polymorphic is Owner, then associationType is Owner - // Toy use OwnerID, OwnerType ('dogs') as foreign key - if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil { - associationType = polymorphic - relationship.PolymorphicType = polymorphicType.Name - relationship.PolymorphicDBName = polymorphicType.DBName - // if Dog has multiple set of toys set name of the set (instead of default 'dogs') - if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok { - relationship.PolymorphicValue = value - } else { - relationship.PolymorphicValue = scope.TableName() - } - polymorphicType.IsForeignKey = true - } - } - - // if no foreign keys defined with tag - if len(foreignKeys) == 0 { - // if no association foreign keys defined with tag - if len(associationForeignKeys) == 0 { - for _, field := range modelStruct.PrimaryFields { - foreignKeys = append(foreignKeys, associationType+field.Name) - associationForeignKeys = append(associationForeignKeys, field.Name) - } - } else { - // generate foreign keys from defined association foreign keys - for _, scopeFieldName := range associationForeignKeys { - if foreignField := getForeignField(scopeFieldName, modelStruct.StructFields); foreignField != nil { - foreignKeys = append(foreignKeys, associationType+foreignField.Name) - associationForeignKeys = append(associationForeignKeys, foreignField.Name) - } - } - } - } else { - // generate association foreign keys from foreign keys - if len(associationForeignKeys) == 0 { - for _, foreignKey := range foreignKeys { - if strings.HasPrefix(foreignKey, associationType) { - associationForeignKey := strings.TrimPrefix(foreignKey, associationType) - if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { - associationForeignKeys = append(associationForeignKeys, associationForeignKey) - } - } - } - if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { - associationForeignKeys = []string{scope.PrimaryKey()} - } - } else if len(foreignKeys) != len(associationForeignKeys) { - scope.Err(errors.New("invalid foreign keys, should have same length")) - return - } - } - - for idx, foreignKey := range foreignKeys { - if foreignField := getForeignField(foreignKey, toFields); foreignField != nil { - if associationField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); associationField != nil { - // mark field as foreignkey, use global lock to avoid race - structsLock.Lock() - foreignField.IsForeignKey = true - structsLock.Unlock() - - // association foreign keys - relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name) - relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName) - - // association foreign keys - relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) - relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) - } - } - } - - if len(relationship.ForeignFieldNames) != 0 { - field.Relationship = relationship - } - } - } else { - field.IsNormal = true - } - }(field) - case reflect.Struct: - defer func(field *StructField) { - var ( - // user has one profile, associationType is User, profile use UserID as foreign key - // user belongs to profile, associationType is Profile, user use ProfileID as foreign key - associationType = reflectType.Name() - relationship = &Relationship{} - toScope = scope.New(reflect.New(field.Struct.Type).Interface()) - toFields = toScope.GetStructFields() - tagForeignKeys []string - tagAssociationForeignKeys []string - ) - - if foreignKey, _ := field.TagSettingsGet("FOREIGNKEY"); foreignKey != "" { - tagForeignKeys = strings.Split(foreignKey, ",") - } - - if foreignKey, _ := field.TagSettingsGet("ASSOCIATION_FOREIGNKEY"); foreignKey != "" { - tagAssociationForeignKeys = strings.Split(foreignKey, ",") - } else if foreignKey, _ := field.TagSettingsGet("ASSOCIATIONFOREIGNKEY"); foreignKey != "" { - tagAssociationForeignKeys = strings.Split(foreignKey, ",") - } - - if polymorphic, _ := field.TagSettingsGet("POLYMORPHIC"); polymorphic != "" { - // Cat has one toy, tag polymorphic is Owner, then associationType is Owner - // Toy use OwnerID, OwnerType ('cats') as foreign key - if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil { - associationType = polymorphic - relationship.PolymorphicType = polymorphicType.Name - relationship.PolymorphicDBName = polymorphicType.DBName - // if Cat has several different types of toys set name for each (instead of default 'cats') - if value, ok := field.TagSettingsGet("POLYMORPHIC_VALUE"); ok { - relationship.PolymorphicValue = value - } else { - relationship.PolymorphicValue = scope.TableName() - } - polymorphicType.IsForeignKey = true - } - } - - // Has One - { - var foreignKeys = tagForeignKeys - var associationForeignKeys = tagAssociationForeignKeys - // if no foreign keys defined with tag - if len(foreignKeys) == 0 { - // if no association foreign keys defined with tag - if len(associationForeignKeys) == 0 { - for _, primaryField := range modelStruct.PrimaryFields { - foreignKeys = append(foreignKeys, associationType+primaryField.Name) - associationForeignKeys = append(associationForeignKeys, primaryField.Name) - } - } else { - // generate foreign keys form association foreign keys - for _, associationForeignKey := range tagAssociationForeignKeys { - if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { - foreignKeys = append(foreignKeys, associationType+foreignField.Name) - associationForeignKeys = append(associationForeignKeys, foreignField.Name) - } - } - } - } else { - // generate association foreign keys from foreign keys - if len(associationForeignKeys) == 0 { - for _, foreignKey := range foreignKeys { - if strings.HasPrefix(foreignKey, associationType) { - associationForeignKey := strings.TrimPrefix(foreignKey, associationType) - if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { - associationForeignKeys = append(associationForeignKeys, associationForeignKey) - } - } - } - if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { - associationForeignKeys = []string{scope.PrimaryKey()} - } - } else if len(foreignKeys) != len(associationForeignKeys) { - scope.Err(errors.New("invalid foreign keys, should have same length")) - return - } - } - - for idx, foreignKey := range foreignKeys { - if foreignField := getForeignField(foreignKey, toFields); foreignField != nil { - if scopeField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); scopeField != nil { - // mark field as foreignkey, use global lock to avoid race - structsLock.Lock() - foreignField.IsForeignKey = true - structsLock.Unlock() - - // association foreign keys - relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scopeField.Name) - relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scopeField.DBName) - - // association foreign keys - relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) - relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) - } - } - } - } - - if len(relationship.ForeignFieldNames) != 0 { - relationship.Kind = "has_one" - field.Relationship = relationship - } else { - var foreignKeys = tagForeignKeys - var associationForeignKeys = tagAssociationForeignKeys - - if len(foreignKeys) == 0 { - // generate foreign keys & association foreign keys - if len(associationForeignKeys) == 0 { - for _, primaryField := range toScope.PrimaryFields() { - foreignKeys = append(foreignKeys, field.Name+primaryField.Name) - associationForeignKeys = append(associationForeignKeys, primaryField.Name) - } - } else { - // generate foreign keys with association foreign keys - for _, associationForeignKey := range associationForeignKeys { - if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil { - foreignKeys = append(foreignKeys, field.Name+foreignField.Name) - associationForeignKeys = append(associationForeignKeys, foreignField.Name) - } - } - } - } else { - // generate foreign keys & association foreign keys - if len(associationForeignKeys) == 0 { - for _, foreignKey := range foreignKeys { - if strings.HasPrefix(foreignKey, field.Name) { - associationForeignKey := strings.TrimPrefix(foreignKey, field.Name) - if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil { - associationForeignKeys = append(associationForeignKeys, associationForeignKey) - } - } - } - if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { - associationForeignKeys = []string{toScope.PrimaryKey()} - } - } else if len(foreignKeys) != len(associationForeignKeys) { - scope.Err(errors.New("invalid foreign keys, should have same length")) - return - } - } - - for idx, foreignKey := range foreignKeys { - if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil { - if associationField := getForeignField(associationForeignKeys[idx], toFields); associationField != nil { - // mark field as foreignkey, use global lock to avoid race - structsLock.Lock() - foreignField.IsForeignKey = true - structsLock.Unlock() - - // association foreign keys - relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name) - relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName) - - // source foreign keys - relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) - relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) - } - } - } - - if len(relationship.ForeignFieldNames) != 0 { - relationship.Kind = "belongs_to" - field.Relationship = relationship - } - } - }(field) - default: - field.IsNormal = true - } - } - } - - // Even it is ignored, also possible to decode db value into the field - if value, ok := field.TagSettingsGet("COLUMN"); ok { - field.DBName = value - } else { - field.DBName = ToColumnName(fieldStruct.Name) - } - - modelStruct.StructFields = append(modelStruct.StructFields, field) - } - } - - if len(modelStruct.PrimaryFields) == 0 { - if field := getForeignField("id", modelStruct.StructFields); field != nil { - field.IsPrimaryKey = true - modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field) - } - } - - modelStructsMap.Store(hashKey, &modelStruct) - - return &modelStruct -} - -// GetStructFields get model's field structs -func (scope *Scope) GetStructFields() (fields []*StructField) { - return scope.GetModelStruct().StructFields -} - -func parseTagSetting(tags reflect.StructTag) map[string]string { - setting := map[string]string{} - for _, str := range []string{tags.Get("sql"), tags.Get("gorm")} { - if str == "" { - continue - } - tags := strings.Split(str, ";") - for _, value := range tags { - v := strings.Split(value, ":") - k := strings.TrimSpace(strings.ToUpper(v[0])) - if len(v) >= 2 { - setting[k] = strings.Join(v[1:], ":") - } else { - setting[k] = k - } - } - } - return setting -} diff --git a/vendor/github.com/jinzhu/gorm/naming.go b/vendor/github.com/jinzhu/gorm/naming.go deleted file mode 100644 index 6b0a4fd..0000000 --- a/vendor/github.com/jinzhu/gorm/naming.go +++ /dev/null @@ -1,124 +0,0 @@ -package gorm - -import ( - "bytes" - "strings" -) - -// Namer is a function type which is given a string and return a string -type Namer func(string) string - -// NamingStrategy represents naming strategies -type NamingStrategy struct { - DB Namer - Table Namer - Column Namer -} - -// TheNamingStrategy is being initialized with defaultNamingStrategy -var TheNamingStrategy = &NamingStrategy{ - DB: defaultNamer, - Table: defaultNamer, - Column: defaultNamer, -} - -// AddNamingStrategy sets the naming strategy -func AddNamingStrategy(ns *NamingStrategy) { - if ns.DB == nil { - ns.DB = defaultNamer - } - if ns.Table == nil { - ns.Table = defaultNamer - } - if ns.Column == nil { - ns.Column = defaultNamer - } - TheNamingStrategy = ns -} - -// DBName alters the given name by DB -func (ns *NamingStrategy) DBName(name string) string { - return ns.DB(name) -} - -// TableName alters the given name by Table -func (ns *NamingStrategy) TableName(name string) string { - return ns.Table(name) -} - -// ColumnName alters the given name by Column -func (ns *NamingStrategy) ColumnName(name string) string { - return ns.Column(name) -} - -// ToDBName convert string to db name -func ToDBName(name string) string { - return TheNamingStrategy.DBName(name) -} - -// ToTableName convert string to table name -func ToTableName(name string) string { - return TheNamingStrategy.TableName(name) -} - -// ToColumnName convert string to db name -func ToColumnName(name string) string { - return TheNamingStrategy.ColumnName(name) -} - -var smap = newSafeMap() - -func defaultNamer(name string) string { - const ( - lower = false - upper = true - ) - - if v := smap.Get(name); v != "" { - return v - } - - if name == "" { - return "" - } - - var ( - value = commonInitialismsReplacer.Replace(name) - buf = bytes.NewBufferString("") - lastCase, currCase, nextCase, nextNumber bool - ) - - for i, v := range value[:len(value)-1] { - nextCase = bool(value[i+1] >= 'A' && value[i+1] <= 'Z') - nextNumber = bool(value[i+1] >= '0' && value[i+1] <= '9') - - if i > 0 { - if currCase == upper { - if lastCase == upper && (nextCase == upper || nextNumber == upper) { - buf.WriteRune(v) - } else { - if value[i-1] != '_' && value[i+1] != '_' { - buf.WriteRune('_') - } - buf.WriteRune(v) - } - } else { - buf.WriteRune(v) - if i == len(value)-2 && (nextCase == upper && nextNumber == lower) { - buf.WriteRune('_') - } - } - } else { - currCase = upper - buf.WriteRune(v) - } - lastCase = currCase - currCase = nextCase - } - - buf.WriteByte(value[len(value)-1]) - - s := strings.ToLower(buf.String()) - smap.Set(name, s) - return s -} diff --git a/vendor/github.com/jinzhu/gorm/scope.go b/vendor/github.com/jinzhu/gorm/scope.go deleted file mode 100644 index d82cadb..0000000 --- a/vendor/github.com/jinzhu/gorm/scope.go +++ /dev/null @@ -1,1421 +0,0 @@ -package gorm - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "errors" - "fmt" - "reflect" - "regexp" - "strings" - "time" -) - -// Scope contain current operation's information when you perform any operation on the database -type Scope struct { - Search *search - Value interface{} - SQL string - SQLVars []interface{} - db *DB - instanceID string - primaryKeyField *Field - skipLeft bool - fields *[]*Field - selectAttrs *[]string -} - -// IndirectValue return scope's reflect value's indirect value -func (scope *Scope) IndirectValue() reflect.Value { - return indirect(reflect.ValueOf(scope.Value)) -} - -// New create a new Scope without search information -func (scope *Scope) New(value interface{}) *Scope { - return &Scope{db: scope.NewDB(), Search: &search{}, Value: value} -} - -//////////////////////////////////////////////////////////////////////////////// -// Scope DB -//////////////////////////////////////////////////////////////////////////////// - -// DB return scope's DB connection -func (scope *Scope) DB() *DB { - return scope.db -} - -// NewDB create a new DB without search information -func (scope *Scope) NewDB() *DB { - if scope.db != nil { - db := scope.db.clone() - db.search = nil - db.Value = nil - return db - } - return nil -} - -// SQLDB return *sql.DB -func (scope *Scope) SQLDB() SQLCommon { - return scope.db.db -} - -// Dialect get dialect -func (scope *Scope) Dialect() Dialect { - return scope.db.dialect -} - -// Quote used to quote string to escape them for database -func (scope *Scope) Quote(str string) string { - if strings.Contains(str, ".") { - newStrs := []string{} - for _, str := range strings.Split(str, ".") { - newStrs = append(newStrs, scope.Dialect().Quote(str)) - } - return strings.Join(newStrs, ".") - } - - return scope.Dialect().Quote(str) -} - -// Err add error to Scope -func (scope *Scope) Err(err error) error { - if err != nil { - scope.db.AddError(err) - } - return err -} - -// HasError check if there are any error -func (scope *Scope) HasError() bool { - return scope.db.Error != nil -} - -// Log print log message -func (scope *Scope) Log(v ...interface{}) { - scope.db.log(v...) -} - -// SkipLeft skip remaining callbacks -func (scope *Scope) SkipLeft() { - scope.skipLeft = true -} - -// Fields get value's fields -func (scope *Scope) Fields() []*Field { - if scope.fields == nil { - var ( - fields []*Field - indirectScopeValue = scope.IndirectValue() - isStruct = indirectScopeValue.Kind() == reflect.Struct - ) - - for _, structField := range scope.GetModelStruct().StructFields { - if isStruct { - fieldValue := indirectScopeValue - for _, name := range structField.Names { - if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { - fieldValue.Set(reflect.New(fieldValue.Type().Elem())) - } - fieldValue = reflect.Indirect(fieldValue).FieldByName(name) - } - fields = append(fields, &Field{StructField: structField, Field: fieldValue, IsBlank: isBlank(fieldValue)}) - } else { - fields = append(fields, &Field{StructField: structField, IsBlank: true}) - } - } - scope.fields = &fields - } - - return *scope.fields -} - -// FieldByName find `gorm.Field` with field name or db name -func (scope *Scope) FieldByName(name string) (field *Field, ok bool) { - var ( - dbName = ToColumnName(name) - mostMatchedField *Field - ) - - for _, field := range scope.Fields() { - if field.Name == name || field.DBName == name { - return field, true - } - if field.DBName == dbName { - mostMatchedField = field - } - } - return mostMatchedField, mostMatchedField != nil -} - -// PrimaryFields return scope's primary fields -func (scope *Scope) PrimaryFields() (fields []*Field) { - for _, field := range scope.Fields() { - if field.IsPrimaryKey { - fields = append(fields, field) - } - } - return fields -} - -// PrimaryField return scope's main primary field, if defined more that one primary fields, will return the one having column name `id` or the first one -func (scope *Scope) PrimaryField() *Field { - if primaryFields := scope.GetModelStruct().PrimaryFields; len(primaryFields) > 0 { - if len(primaryFields) > 1 { - if field, ok := scope.FieldByName("id"); ok { - return field - } - } - return scope.PrimaryFields()[0] - } - return nil -} - -// PrimaryKey get main primary field's db name -func (scope *Scope) PrimaryKey() string { - if field := scope.PrimaryField(); field != nil { - return field.DBName - } - return "" -} - -// PrimaryKeyZero check main primary field's value is blank or not -func (scope *Scope) PrimaryKeyZero() bool { - field := scope.PrimaryField() - return field == nil || field.IsBlank -} - -// PrimaryKeyValue get the primary key's value -func (scope *Scope) PrimaryKeyValue() interface{} { - if field := scope.PrimaryField(); field != nil && field.Field.IsValid() { - return field.Field.Interface() - } - return 0 -} - -// HasColumn to check if has column -func (scope *Scope) HasColumn(column string) bool { - for _, field := range scope.GetStructFields() { - if field.IsNormal && (field.Name == column || field.DBName == column) { - return true - } - } - return false -} - -// SetColumn to set the column's value, column could be field or field's name/dbname -func (scope *Scope) SetColumn(column interface{}, value interface{}) error { - var updateAttrs = map[string]interface{}{} - if attrs, ok := scope.InstanceGet("gorm:update_attrs"); ok { - updateAttrs = attrs.(map[string]interface{}) - defer scope.InstanceSet("gorm:update_attrs", updateAttrs) - } - - if field, ok := column.(*Field); ok { - updateAttrs[field.DBName] = value - return field.Set(value) - } else if name, ok := column.(string); ok { - var ( - dbName = ToDBName(name) - mostMatchedField *Field - ) - for _, field := range scope.Fields() { - if field.DBName == value { - updateAttrs[field.DBName] = value - return field.Set(value) - } - if !field.IsIgnored && ((field.DBName == dbName) || (field.Name == name && mostMatchedField == nil)) { - mostMatchedField = field - } - } - - if mostMatchedField != nil { - updateAttrs[mostMatchedField.DBName] = value - return mostMatchedField.Set(value) - } - } - return errors.New("could not convert column to field") -} - -// CallMethod call scope value's method, if it is a slice, will call its element's method one by one -func (scope *Scope) CallMethod(methodName string) { - if scope.Value == nil { - return - } - - if indirectScopeValue := scope.IndirectValue(); indirectScopeValue.Kind() == reflect.Slice { - for i := 0; i < indirectScopeValue.Len(); i++ { - scope.callMethod(methodName, indirectScopeValue.Index(i)) - } - } else { - scope.callMethod(methodName, indirectScopeValue) - } -} - -// AddToVars add value as sql's vars, used to prevent SQL injection -func (scope *Scope) AddToVars(value interface{}) string { - _, skipBindVar := scope.InstanceGet("skip_bindvar") - - if expr, ok := value.(*SqlExpr); ok { - exp := expr.expr - for _, arg := range expr.args { - if skipBindVar { - scope.AddToVars(arg) - } else { - exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1) - } - } - return exp - } - - scope.SQLVars = append(scope.SQLVars, value) - - if skipBindVar { - return "?" - } - return scope.Dialect().BindVar(len(scope.SQLVars)) -} - -// SelectAttrs return selected attributes -func (scope *Scope) SelectAttrs() []string { - if scope.selectAttrs == nil { - attrs := []string{} - for _, value := range scope.Search.selects { - if str, ok := value.(string); ok { - attrs = append(attrs, str) - } else if strs, ok := value.([]string); ok { - attrs = append(attrs, strs...) - } else if strs, ok := value.([]interface{}); ok { - for _, str := range strs { - attrs = append(attrs, fmt.Sprintf("%v", str)) - } - } - } - scope.selectAttrs = &attrs - } - return *scope.selectAttrs -} - -// OmitAttrs return omitted attributes -func (scope *Scope) OmitAttrs() []string { - return scope.Search.omits -} - -type tabler interface { - TableName() string -} - -type dbTabler interface { - TableName(*DB) string -} - -// TableName return table name -func (scope *Scope) TableName() string { - if scope.Search != nil && len(scope.Search.tableName) > 0 { - return scope.Search.tableName - } - - if tabler, ok := scope.Value.(tabler); ok { - return tabler.TableName() - } - - if tabler, ok := scope.Value.(dbTabler); ok { - return tabler.TableName(scope.db) - } - - return scope.GetModelStruct().TableName(scope.db.Model(scope.Value)) -} - -// QuotedTableName return quoted table name -func (scope *Scope) QuotedTableName() (name string) { - if scope.Search != nil && len(scope.Search.tableName) > 0 { - if strings.Contains(scope.Search.tableName, " ") { - return scope.Search.tableName - } - return scope.Quote(scope.Search.tableName) - } - - return scope.Quote(scope.TableName()) -} - -// CombinedConditionSql return combined condition sql -func (scope *Scope) CombinedConditionSql() string { - joinSQL := scope.joinsSQL() - whereSQL := scope.whereSQL() - if scope.Search.raw { - whereSQL = strings.TrimSuffix(strings.TrimPrefix(whereSQL, "WHERE ("), ")") - } - return joinSQL + whereSQL + scope.groupSQL() + - scope.havingSQL() + scope.orderSQL() + scope.limitAndOffsetSQL() -} - -// Raw set raw sql -func (scope *Scope) Raw(sql string) *Scope { - scope.SQL = strings.Replace(sql, "$$$", "?", -1) - return scope -} - -// Exec perform generated SQL -func (scope *Scope) Exec() *Scope { - defer scope.trace(NowFunc()) - - if !scope.HasError() { - if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { - if count, err := result.RowsAffected(); scope.Err(err) == nil { - scope.db.RowsAffected = count - } - } - } - return scope -} - -// Set set value by name -func (scope *Scope) Set(name string, value interface{}) *Scope { - scope.db.InstantSet(name, value) - return scope -} - -// Get get setting by name -func (scope *Scope) Get(name string) (interface{}, bool) { - return scope.db.Get(name) -} - -// InstanceID get InstanceID for scope -func (scope *Scope) InstanceID() string { - if scope.instanceID == "" { - scope.instanceID = fmt.Sprintf("%v%v", &scope, &scope.db) - } - return scope.instanceID -} - -// InstanceSet set instance setting for current operation, but not for operations in callbacks, like saving associations callback -func (scope *Scope) InstanceSet(name string, value interface{}) *Scope { - return scope.Set(name+scope.InstanceID(), value) -} - -// InstanceGet get instance setting from current operation -func (scope *Scope) InstanceGet(name string) (interface{}, bool) { - return scope.Get(name + scope.InstanceID()) -} - -// Begin start a transaction -func (scope *Scope) Begin() *Scope { - if db, ok := scope.SQLDB().(sqlDb); ok { - if tx, err := db.Begin(); scope.Err(err) == nil { - scope.db.db = interface{}(tx).(SQLCommon) - scope.InstanceSet("gorm:started_transaction", true) - } - } - return scope -} - -// CommitOrRollback commit current transaction if no error happened, otherwise will rollback it -func (scope *Scope) CommitOrRollback() *Scope { - if _, ok := scope.InstanceGet("gorm:started_transaction"); ok { - if db, ok := scope.db.db.(sqlTx); ok { - if scope.HasError() { - db.Rollback() - } else { - scope.Err(db.Commit()) - } - scope.db.db = scope.db.parent.db - } - } - return scope -} - -//////////////////////////////////////////////////////////////////////////////// -// Private Methods For *gorm.Scope -//////////////////////////////////////////////////////////////////////////////// - -func (scope *Scope) callMethod(methodName string, reflectValue reflect.Value) { - // Only get address from non-pointer - if reflectValue.CanAddr() && reflectValue.Kind() != reflect.Ptr { - reflectValue = reflectValue.Addr() - } - - if methodValue := reflectValue.MethodByName(methodName); methodValue.IsValid() { - switch method := methodValue.Interface().(type) { - case func(): - method() - case func(*Scope): - method(scope) - case func(*DB): - newDB := scope.NewDB() - method(newDB) - scope.Err(newDB.Error) - case func() error: - scope.Err(method()) - case func(*Scope) error: - scope.Err(method(scope)) - case func(*DB) error: - newDB := scope.NewDB() - scope.Err(method(newDB)) - scope.Err(newDB.Error) - default: - scope.Err(fmt.Errorf("unsupported function %v", methodName)) - } - } -} - -var ( - columnRegexp = regexp.MustCompile("^[a-zA-Z\\d]+(\\.[a-zA-Z\\d]+)*$") // only match string like `name`, `users.name` - isNumberRegexp = regexp.MustCompile("^\\s*\\d+\\s*$") // match if string is number - comparisonRegexp = regexp.MustCompile("(?i) (=|<>|(>|<)(=?)|LIKE|IS|IN) ") - countingQueryRegexp = regexp.MustCompile("(?i)^count(.+)$") -) - -func (scope *Scope) quoteIfPossible(str string) string { - if columnRegexp.MatchString(str) { - return scope.Quote(str) - } - return str -} - -func (scope *Scope) scan(rows *sql.Rows, columns []string, fields []*Field) { - var ( - ignored interface{} - values = make([]interface{}, len(columns)) - selectFields []*Field - selectedColumnsMap = map[string]int{} - resetFields = map[int]*Field{} - ) - - for index, column := range columns { - values[index] = &ignored - - selectFields = fields - offset := 0 - if idx, ok := selectedColumnsMap[column]; ok { - offset = idx + 1 - selectFields = selectFields[offset:] - } - - for fieldIndex, field := range selectFields { - if field.DBName == column { - if field.Field.Kind() == reflect.Ptr { - values[index] = field.Field.Addr().Interface() - } else { - reflectValue := reflect.New(reflect.PtrTo(field.Struct.Type)) - reflectValue.Elem().Set(field.Field.Addr()) - values[index] = reflectValue.Interface() - resetFields[index] = field - } - - selectedColumnsMap[column] = offset + fieldIndex - - if field.IsNormal { - break - } - } - } - } - - scope.Err(rows.Scan(values...)) - - for index, field := range resetFields { - if v := reflect.ValueOf(values[index]).Elem().Elem(); v.IsValid() { - field.Field.Set(v) - } - } -} - -func (scope *Scope) primaryCondition(value interface{}) string { - return fmt.Sprintf("(%v.%v = %v)", scope.QuotedTableName(), scope.Quote(scope.PrimaryKey()), value) -} - -func (scope *Scope) buildCondition(clause map[string]interface{}, include bool) (str string) { - var ( - quotedTableName = scope.QuotedTableName() - quotedPrimaryKey = scope.Quote(scope.PrimaryKey()) - equalSQL = "=" - inSQL = "IN" - ) - - // If building not conditions - if !include { - equalSQL = "<>" - inSQL = "NOT IN" - } - - switch value := clause["query"].(type) { - case sql.NullInt64: - return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, value.Int64) - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, value) - case []int, []int8, []int16, []int32, []int64, []uint, []uint8, []uint16, []uint32, []uint64, []string, []interface{}: - if !include && reflect.ValueOf(value).Len() == 0 { - return - } - str = fmt.Sprintf("(%v.%v %s (?))", quotedTableName, quotedPrimaryKey, inSQL) - clause["args"] = []interface{}{value} - case string: - if isNumberRegexp.MatchString(value) { - return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, scope.AddToVars(value)) - } - - if value != "" { - if !include { - if comparisonRegexp.MatchString(value) { - str = fmt.Sprintf("NOT (%v)", value) - } else { - str = fmt.Sprintf("(%v.%v NOT IN (?))", quotedTableName, scope.Quote(value)) - } - } else { - str = fmt.Sprintf("(%v)", value) - } - } - case map[string]interface{}: - var sqls []string - for key, value := range value { - if value != nil { - sqls = append(sqls, fmt.Sprintf("(%v.%v %s %v)", quotedTableName, scope.Quote(key), equalSQL, scope.AddToVars(value))) - } else { - if !include { - sqls = append(sqls, fmt.Sprintf("(%v.%v IS NOT NULL)", quotedTableName, scope.Quote(key))) - } else { - sqls = append(sqls, fmt.Sprintf("(%v.%v IS NULL)", quotedTableName, scope.Quote(key))) - } - } - } - return strings.Join(sqls, " AND ") - case interface{}: - var sqls []string - newScope := scope.New(value) - - if len(newScope.Fields()) == 0 { - scope.Err(fmt.Errorf("invalid query condition: %v", value)) - return - } - scopeQuotedTableName := newScope.QuotedTableName() - for _, field := range newScope.Fields() { - if !field.IsIgnored && !field.IsBlank { - sqls = append(sqls, fmt.Sprintf("(%v.%v %s %v)", scopeQuotedTableName, scope.Quote(field.DBName), equalSQL, scope.AddToVars(field.Field.Interface()))) - } - } - return strings.Join(sqls, " AND ") - default: - scope.Err(fmt.Errorf("invalid query condition: %v", value)) - return - } - - replacements := []string{} - args := clause["args"].([]interface{}) - for _, arg := range args { - var err error - switch reflect.ValueOf(arg).Kind() { - case reflect.Slice: // For where("id in (?)", []int64{1,2}) - if scanner, ok := interface{}(arg).(driver.Valuer); ok { - arg, err = scanner.Value() - replacements = append(replacements, scope.AddToVars(arg)) - } else if b, ok := arg.([]byte); ok { - replacements = append(replacements, scope.AddToVars(b)) - } else if as, ok := arg.([][]interface{}); ok { - var tempMarks []string - for _, a := range as { - var arrayMarks []string - for _, v := range a { - arrayMarks = append(arrayMarks, scope.AddToVars(v)) - } - - if len(arrayMarks) > 0 { - tempMarks = append(tempMarks, fmt.Sprintf("(%v)", strings.Join(arrayMarks, ","))) - } - } - - if len(tempMarks) > 0 { - replacements = append(replacements, strings.Join(tempMarks, ",")) - } - } else if values := reflect.ValueOf(arg); values.Len() > 0 { - var tempMarks []string - for i := 0; i < values.Len(); i++ { - tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) - } - replacements = append(replacements, strings.Join(tempMarks, ",")) - } else { - replacements = append(replacements, scope.AddToVars(Expr("NULL"))) - } - default: - if valuer, ok := interface{}(arg).(driver.Valuer); ok { - arg, err = valuer.Value() - } - - replacements = append(replacements, scope.AddToVars(arg)) - } - - if err != nil { - scope.Err(err) - } - } - - buff := bytes.NewBuffer([]byte{}) - i := 0 - for _, s := range str { - if s == '?' && len(replacements) > i { - buff.WriteString(replacements[i]) - i++ - } else { - buff.WriteRune(s) - } - } - - str = buff.String() - - return -} - -func (scope *Scope) buildSelectQuery(clause map[string]interface{}) (str string) { - switch value := clause["query"].(type) { - case string: - str = value - case []string: - str = strings.Join(value, ", ") - } - - args := clause["args"].([]interface{}) - replacements := []string{} - for _, arg := range args { - switch reflect.ValueOf(arg).Kind() { - case reflect.Slice: - values := reflect.ValueOf(arg) - var tempMarks []string - for i := 0; i < values.Len(); i++ { - tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) - } - replacements = append(replacements, strings.Join(tempMarks, ",")) - default: - if valuer, ok := interface{}(arg).(driver.Valuer); ok { - arg, _ = valuer.Value() - } - replacements = append(replacements, scope.AddToVars(arg)) - } - } - - buff := bytes.NewBuffer([]byte{}) - i := 0 - for pos, char := range str { - if str[pos] == '?' { - buff.WriteString(replacements[i]) - i++ - } else { - buff.WriteRune(char) - } - } - - str = buff.String() - - return -} - -func (scope *Scope) whereSQL() (sql string) { - var ( - quotedTableName = scope.QuotedTableName() - deletedAtField, hasDeletedAtField = scope.FieldByName("DeletedAt") - primaryConditions, andConditions, orConditions []string - ) - - if !scope.Search.Unscoped && hasDeletedAtField { - sql := fmt.Sprintf("%v.%v IS NULL", quotedTableName, scope.Quote(deletedAtField.DBName)) - primaryConditions = append(primaryConditions, sql) - } - - if !scope.PrimaryKeyZero() { - for _, field := range scope.PrimaryFields() { - sql := fmt.Sprintf("%v.%v = %v", quotedTableName, scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface())) - primaryConditions = append(primaryConditions, sql) - } - } - - for _, clause := range scope.Search.whereConditions { - if sql := scope.buildCondition(clause, true); sql != "" { - andConditions = append(andConditions, sql) - } - } - - for _, clause := range scope.Search.orConditions { - if sql := scope.buildCondition(clause, true); sql != "" { - orConditions = append(orConditions, sql) - } - } - - for _, clause := range scope.Search.notConditions { - if sql := scope.buildCondition(clause, false); sql != "" { - andConditions = append(andConditions, sql) - } - } - - orSQL := strings.Join(orConditions, " OR ") - combinedSQL := strings.Join(andConditions, " AND ") - if len(combinedSQL) > 0 { - if len(orSQL) > 0 { - combinedSQL = combinedSQL + " OR " + orSQL - } - } else { - combinedSQL = orSQL - } - - if len(primaryConditions) > 0 { - sql = "WHERE " + strings.Join(primaryConditions, " AND ") - if len(combinedSQL) > 0 { - sql = sql + " AND (" + combinedSQL + ")" - } - } else if len(combinedSQL) > 0 { - sql = "WHERE " + combinedSQL - } - return -} - -func (scope *Scope) selectSQL() string { - if len(scope.Search.selects) == 0 { - if len(scope.Search.joinConditions) > 0 { - return fmt.Sprintf("%v.*", scope.QuotedTableName()) - } - return "*" - } - return scope.buildSelectQuery(scope.Search.selects) -} - -func (scope *Scope) orderSQL() string { - if len(scope.Search.orders) == 0 || scope.Search.ignoreOrderQuery { - return "" - } - - var orders []string - for _, order := range scope.Search.orders { - if str, ok := order.(string); ok { - orders = append(orders, scope.quoteIfPossible(str)) - } else if expr, ok := order.(*SqlExpr); ok { - exp := expr.expr - for _, arg := range expr.args { - exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1) - } - orders = append(orders, exp) - } - } - return " ORDER BY " + strings.Join(orders, ",") -} - -func (scope *Scope) limitAndOffsetSQL() string { - sql, err := scope.Dialect().LimitAndOffsetSQL(scope.Search.limit, scope.Search.offset) - scope.Err(err) - return sql -} - -func (scope *Scope) groupSQL() string { - if len(scope.Search.group) == 0 { - return "" - } - return " GROUP BY " + scope.Search.group -} - -func (scope *Scope) havingSQL() string { - if len(scope.Search.havingConditions) == 0 { - return "" - } - - var andConditions []string - for _, clause := range scope.Search.havingConditions { - if sql := scope.buildCondition(clause, true); sql != "" { - andConditions = append(andConditions, sql) - } - } - - combinedSQL := strings.Join(andConditions, " AND ") - if len(combinedSQL) == 0 { - return "" - } - - return " HAVING " + combinedSQL -} - -func (scope *Scope) joinsSQL() string { - var joinConditions []string - for _, clause := range scope.Search.joinConditions { - if sql := scope.buildCondition(clause, true); sql != "" { - joinConditions = append(joinConditions, strings.TrimSuffix(strings.TrimPrefix(sql, "("), ")")) - } - } - - return strings.Join(joinConditions, " ") + " " -} - -func (scope *Scope) prepareQuerySQL() { - if scope.Search.raw { - scope.Raw(scope.CombinedConditionSql()) - } else { - scope.Raw(fmt.Sprintf("SELECT %v FROM %v %v", scope.selectSQL(), scope.QuotedTableName(), scope.CombinedConditionSql())) - } - return -} - -func (scope *Scope) inlineCondition(values ...interface{}) *Scope { - if len(values) > 0 { - scope.Search.Where(values[0], values[1:]...) - } - return scope -} - -func (scope *Scope) callCallbacks(funcs []*func(s *Scope)) *Scope { - defer func() { - if err := recover(); err != nil { - if db, ok := scope.db.db.(sqlTx); ok { - db.Rollback() - } - panic(err) - } - }() - for _, f := range funcs { - (*f)(scope) - if scope.skipLeft { - break - } - } - return scope -} - -func convertInterfaceToMap(values interface{}, withIgnoredField bool, db *DB) map[string]interface{} { - var attrs = map[string]interface{}{} - - switch value := values.(type) { - case map[string]interface{}: - return value - case []interface{}: - for _, v := range value { - for key, value := range convertInterfaceToMap(v, withIgnoredField, db) { - attrs[key] = value - } - } - case interface{}: - reflectValue := reflect.ValueOf(values) - - switch reflectValue.Kind() { - case reflect.Map: - for _, key := range reflectValue.MapKeys() { - attrs[ToColumnName(key.Interface().(string))] = reflectValue.MapIndex(key).Interface() - } - default: - for _, field := range (&Scope{Value: values, db: db}).Fields() { - if !field.IsBlank && (withIgnoredField || !field.IsIgnored) { - attrs[field.DBName] = field.Field.Interface() - } - } - } - } - return attrs -} - -func (scope *Scope) updatedAttrsWithValues(value interface{}) (results map[string]interface{}, hasUpdate bool) { - if scope.IndirectValue().Kind() != reflect.Struct { - return convertInterfaceToMap(value, false, scope.db), true - } - - results = map[string]interface{}{} - - for key, value := range convertInterfaceToMap(value, true, scope.db) { - if field, ok := scope.FieldByName(key); ok && scope.changeableField(field) { - if _, ok := value.(*SqlExpr); ok { - hasUpdate = true - results[field.DBName] = value - } else { - err := field.Set(value) - if field.IsNormal && !field.IsIgnored { - hasUpdate = true - if err == ErrUnaddressable { - results[field.DBName] = value - } else { - results[field.DBName] = field.Field.Interface() - } - } - } - } - } - return -} - -func (scope *Scope) row() *sql.Row { - defer scope.trace(NowFunc()) - - result := &RowQueryResult{} - scope.InstanceSet("row_query_result", result) - scope.callCallbacks(scope.db.parent.callbacks.rowQueries) - - return result.Row -} - -func (scope *Scope) rows() (*sql.Rows, error) { - defer scope.trace(NowFunc()) - - result := &RowsQueryResult{} - scope.InstanceSet("row_query_result", result) - scope.callCallbacks(scope.db.parent.callbacks.rowQueries) - - return result.Rows, result.Error -} - -func (scope *Scope) initialize() *Scope { - for _, clause := range scope.Search.whereConditions { - scope.updatedAttrsWithValues(clause["query"]) - } - scope.updatedAttrsWithValues(scope.Search.initAttrs) - scope.updatedAttrsWithValues(scope.Search.assignAttrs) - return scope -} - -func (scope *Scope) isQueryForColumn(query interface{}, column string) bool { - queryStr := strings.ToLower(fmt.Sprint(query)) - if queryStr == column { - return true - } - - if strings.HasSuffix(queryStr, "as "+column) { - return true - } - - if strings.HasSuffix(queryStr, "as "+scope.Quote(column)) { - return true - } - - return false -} - -func (scope *Scope) pluck(column string, value interface{}) *Scope { - dest := reflect.Indirect(reflect.ValueOf(value)) - if dest.Kind() != reflect.Slice { - scope.Err(fmt.Errorf("results should be a slice, not %s", dest.Kind())) - return scope - } - - if dest.Len() > 0 { - dest.Set(reflect.Zero(dest.Type())) - } - - if query, ok := scope.Search.selects["query"]; !ok || !scope.isQueryForColumn(query, column) { - scope.Search.Select(column) - } - - rows, err := scope.rows() - if scope.Err(err) == nil { - defer rows.Close() - for rows.Next() { - elem := reflect.New(dest.Type().Elem()).Interface() - scope.Err(rows.Scan(elem)) - dest.Set(reflect.Append(dest, reflect.ValueOf(elem).Elem())) - } - - if err := rows.Err(); err != nil { - scope.Err(err) - } - } - return scope -} - -func (scope *Scope) count(value interface{}) *Scope { - if query, ok := scope.Search.selects["query"]; !ok || !countingQueryRegexp.MatchString(fmt.Sprint(query)) { - if len(scope.Search.group) != 0 { - if len(scope.Search.havingConditions) != 0 { - scope.prepareQuerySQL() - scope.Search = &search{} - scope.Search.Select("count(*)") - scope.Search.Table(fmt.Sprintf("( %s ) AS count_table", scope.SQL)) - } else { - scope.Search.Select("count(*) FROM ( SELECT count(*) as name ") - scope.Search.group += " ) AS count_table" - } - } else { - scope.Search.Select("count(*)") - } - } - scope.Search.ignoreOrderQuery = true - scope.Err(scope.row().Scan(value)) - return scope -} - -func (scope *Scope) typeName() string { - typ := scope.IndirectValue().Type() - - for typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - - return typ.Name() -} - -// trace print sql log -func (scope *Scope) trace(t time.Time) { - if len(scope.SQL) > 0 { - scope.db.slog(scope.SQL, t, scope.SQLVars...) - } -} - -func (scope *Scope) changeableField(field *Field) bool { - if selectAttrs := scope.SelectAttrs(); len(selectAttrs) > 0 { - for _, attr := range selectAttrs { - if field.Name == attr || field.DBName == attr { - return true - } - } - return false - } - - for _, attr := range scope.OmitAttrs() { - if field.Name == attr || field.DBName == attr { - return false - } - } - - return true -} - -func (scope *Scope) related(value interface{}, foreignKeys ...string) *Scope { - toScope := scope.db.NewScope(value) - tx := scope.db.Set("gorm:association:source", scope.Value) - - for _, foreignKey := range append(foreignKeys, toScope.typeName()+"Id", scope.typeName()+"Id") { - fromField, _ := scope.FieldByName(foreignKey) - toField, _ := toScope.FieldByName(foreignKey) - - if fromField != nil { - if relationship := fromField.Relationship; relationship != nil { - if relationship.Kind == "many_to_many" { - joinTableHandler := relationship.JoinTableHandler - scope.Err(joinTableHandler.JoinWith(joinTableHandler, tx, scope.Value).Find(value).Error) - } else if relationship.Kind == "belongs_to" { - for idx, foreignKey := range relationship.ForeignDBNames { - if field, ok := scope.FieldByName(foreignKey); ok { - tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.AssociationForeignDBNames[idx])), field.Field.Interface()) - } - } - scope.Err(tx.Find(value).Error) - } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" { - for idx, foreignKey := range relationship.ForeignDBNames { - if field, ok := scope.FieldByName(relationship.AssociationForeignDBNames[idx]); ok { - tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) - } - } - - if relationship.PolymorphicType != "" { - tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue) - } - scope.Err(tx.Find(value).Error) - } - } else { - sql := fmt.Sprintf("%v = ?", scope.Quote(toScope.PrimaryKey())) - scope.Err(tx.Where(sql, fromField.Field.Interface()).Find(value).Error) - } - return scope - } else if toField != nil { - sql := fmt.Sprintf("%v = ?", scope.Quote(toField.DBName)) - scope.Err(tx.Where(sql, scope.PrimaryKeyValue()).Find(value).Error) - return scope - } - } - - scope.Err(fmt.Errorf("invalid association %v", foreignKeys)) - return scope -} - -// getTableOptions return the table options string or an empty string if the table options does not exist -func (scope *Scope) getTableOptions() string { - tableOptions, ok := scope.Get("gorm:table_options") - if !ok { - return "" - } - return " " + tableOptions.(string) -} - -func (scope *Scope) createJoinTable(field *StructField) { - if relationship := field.Relationship; relationship != nil && relationship.JoinTableHandler != nil { - joinTableHandler := relationship.JoinTableHandler - joinTable := joinTableHandler.Table(scope.db) - if !scope.Dialect().HasTable(joinTable) { - toScope := &Scope{Value: reflect.New(field.Struct.Type).Interface()} - - var sqlTypes, primaryKeys []string - for idx, fieldName := range relationship.ForeignFieldNames { - if field, ok := scope.FieldByName(fieldName); ok { - foreignKeyStruct := field.clone() - foreignKeyStruct.IsPrimaryKey = false - foreignKeyStruct.TagSettingsSet("IS_JOINTABLE_FOREIGNKEY", "true") - foreignKeyStruct.TagSettingsDelete("AUTO_INCREMENT") - sqlTypes = append(sqlTypes, scope.Quote(relationship.ForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct)) - primaryKeys = append(primaryKeys, scope.Quote(relationship.ForeignDBNames[idx])) - } - } - - for idx, fieldName := range relationship.AssociationForeignFieldNames { - if field, ok := toScope.FieldByName(fieldName); ok { - foreignKeyStruct := field.clone() - foreignKeyStruct.IsPrimaryKey = false - foreignKeyStruct.TagSettingsSet("IS_JOINTABLE_FOREIGNKEY", "true") - foreignKeyStruct.TagSettingsDelete("AUTO_INCREMENT") - sqlTypes = append(sqlTypes, scope.Quote(relationship.AssociationForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct)) - primaryKeys = append(primaryKeys, scope.Quote(relationship.AssociationForeignDBNames[idx])) - } - } - - scope.Err(scope.NewDB().Exec(fmt.Sprintf("CREATE TABLE %v (%v, PRIMARY KEY (%v))%s", scope.Quote(joinTable), strings.Join(sqlTypes, ","), strings.Join(primaryKeys, ","), scope.getTableOptions())).Error) - } - scope.NewDB().Table(joinTable).AutoMigrate(joinTableHandler) - } -} - -func (scope *Scope) createTable() *Scope { - var tags []string - var primaryKeys []string - var primaryKeyInColumnType = false - for _, field := range scope.GetModelStruct().StructFields { - if field.IsNormal { - sqlTag := scope.Dialect().DataTypeOf(field) - - // Check if the primary key constraint was specified as - // part of the column type. If so, we can only support - // one column as the primary key. - if strings.Contains(strings.ToLower(sqlTag), "primary key") { - primaryKeyInColumnType = true - } - - tags = append(tags, scope.Quote(field.DBName)+" "+sqlTag) - } - - if field.IsPrimaryKey { - primaryKeys = append(primaryKeys, scope.Quote(field.DBName)) - } - scope.createJoinTable(field) - } - - var primaryKeyStr string - if len(primaryKeys) > 0 && !primaryKeyInColumnType { - primaryKeyStr = fmt.Sprintf(", PRIMARY KEY (%v)", strings.Join(primaryKeys, ",")) - } - - scope.Raw(fmt.Sprintf("CREATE TABLE %v (%v %v)%s", scope.QuotedTableName(), strings.Join(tags, ","), primaryKeyStr, scope.getTableOptions())).Exec() - - scope.autoIndex() - return scope -} - -func (scope *Scope) dropTable() *Scope { - scope.Raw(fmt.Sprintf("DROP TABLE %v", scope.QuotedTableName())).Exec() - return scope -} - -func (scope *Scope) modifyColumn(column string, typ string) { - scope.db.AddError(scope.Dialect().ModifyColumn(scope.QuotedTableName(), scope.Quote(column), typ)) -} - -func (scope *Scope) dropColumn(column string) { - scope.Raw(fmt.Sprintf("ALTER TABLE %v DROP COLUMN %v", scope.QuotedTableName(), scope.Quote(column))).Exec() -} - -func (scope *Scope) addIndex(unique bool, indexName string, column ...string) { - if scope.Dialect().HasIndex(scope.TableName(), indexName) { - return - } - - var columns []string - for _, name := range column { - columns = append(columns, scope.quoteIfPossible(name)) - } - - sqlCreate := "CREATE INDEX" - if unique { - sqlCreate = "CREATE UNIQUE INDEX" - } - - scope.Raw(fmt.Sprintf("%s %v ON %v(%v) %v", sqlCreate, indexName, scope.QuotedTableName(), strings.Join(columns, ", "), scope.whereSQL())).Exec() -} - -func (scope *Scope) addForeignKey(field string, dest string, onDelete string, onUpdate string) { - // Compatible with old generated key - keyName := scope.Dialect().BuildKeyName(scope.TableName(), field, dest, "foreign") - - if scope.Dialect().HasForeignKey(scope.TableName(), keyName) { - return - } - var query = `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ON DELETE %s ON UPDATE %s;` - scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName), scope.quoteIfPossible(field), dest, onDelete, onUpdate)).Exec() -} - -func (scope *Scope) removeForeignKey(field string, dest string) { - keyName := scope.Dialect().BuildKeyName(scope.TableName(), field, dest, "foreign") - if !scope.Dialect().HasForeignKey(scope.TableName(), keyName) { - return - } - var mysql mysql - var query string - if scope.Dialect().GetName() == mysql.GetName() { - query = `ALTER TABLE %s DROP FOREIGN KEY %s;` - } else { - query = `ALTER TABLE %s DROP CONSTRAINT %s;` - } - - scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName))).Exec() -} - -func (scope *Scope) removeIndex(indexName string) { - scope.Dialect().RemoveIndex(scope.TableName(), indexName) -} - -func (scope *Scope) autoMigrate() *Scope { - tableName := scope.TableName() - quotedTableName := scope.QuotedTableName() - - if !scope.Dialect().HasTable(tableName) { - scope.createTable() - } else { - for _, field := range scope.GetModelStruct().StructFields { - if !scope.Dialect().HasColumn(tableName, field.DBName) { - if field.IsNormal { - sqlTag := scope.Dialect().DataTypeOf(field) - scope.Raw(fmt.Sprintf("ALTER TABLE %v ADD %v %v;", quotedTableName, scope.Quote(field.DBName), sqlTag)).Exec() - } - } - scope.createJoinTable(field) - } - scope.autoIndex() - } - return scope -} - -func (scope *Scope) autoIndex() *Scope { - var indexes = map[string][]string{} - var uniqueIndexes = map[string][]string{} - - for _, field := range scope.GetStructFields() { - if name, ok := field.TagSettingsGet("INDEX"); ok { - names := strings.Split(name, ",") - - for _, name := range names { - if name == "INDEX" || name == "" { - name = scope.Dialect().BuildKeyName("idx", scope.TableName(), field.DBName) - } - name, column := scope.Dialect().NormalizeIndexAndColumn(name, field.DBName) - indexes[name] = append(indexes[name], column) - } - } - - if name, ok := field.TagSettingsGet("UNIQUE_INDEX"); ok { - names := strings.Split(name, ",") - - for _, name := range names { - if name == "UNIQUE_INDEX" || name == "" { - name = scope.Dialect().BuildKeyName("uix", scope.TableName(), field.DBName) - } - name, column := scope.Dialect().NormalizeIndexAndColumn(name, field.DBName) - uniqueIndexes[name] = append(uniqueIndexes[name], column) - } - } - } - - for name, columns := range indexes { - if db := scope.NewDB().Table(scope.TableName()).Model(scope.Value).AddIndex(name, columns...); db.Error != nil { - scope.db.AddError(db.Error) - } - } - - for name, columns := range uniqueIndexes { - if db := scope.NewDB().Table(scope.TableName()).Model(scope.Value).AddUniqueIndex(name, columns...); db.Error != nil { - scope.db.AddError(db.Error) - } - } - - return scope -} - -func (scope *Scope) getColumnAsArray(columns []string, values ...interface{}) (results [][]interface{}) { - resultMap := make(map[string][]interface{}) - for _, value := range values { - indirectValue := indirect(reflect.ValueOf(value)) - - switch indirectValue.Kind() { - case reflect.Slice: - for i := 0; i < indirectValue.Len(); i++ { - var result []interface{} - var object = indirect(indirectValue.Index(i)) - var hasValue = false - for _, column := range columns { - field := object.FieldByName(column) - if hasValue || !isBlank(field) { - hasValue = true - } - result = append(result, field.Interface()) - } - - if hasValue { - h := fmt.Sprint(result...) - if _, exist := resultMap[h]; !exist { - resultMap[h] = result - } - } - } - case reflect.Struct: - var result []interface{} - var hasValue = false - for _, column := range columns { - field := indirectValue.FieldByName(column) - if hasValue || !isBlank(field) { - hasValue = true - } - result = append(result, field.Interface()) - } - - if hasValue { - h := fmt.Sprint(result...) - if _, exist := resultMap[h]; !exist { - resultMap[h] = result - } - } - } - } - for _, v := range resultMap { - results = append(results, v) - } - return -} - -func (scope *Scope) getColumnAsScope(column string) *Scope { - indirectScopeValue := scope.IndirectValue() - - switch indirectScopeValue.Kind() { - case reflect.Slice: - if fieldStruct, ok := scope.GetModelStruct().ModelType.FieldByName(column); ok { - fieldType := fieldStruct.Type - if fieldType.Kind() == reflect.Slice || fieldType.Kind() == reflect.Ptr { - fieldType = fieldType.Elem() - } - - resultsMap := map[interface{}]bool{} - results := reflect.New(reflect.SliceOf(reflect.PtrTo(fieldType))).Elem() - - for i := 0; i < indirectScopeValue.Len(); i++ { - result := indirect(indirect(indirectScopeValue.Index(i)).FieldByName(column)) - - if result.Kind() == reflect.Slice { - for j := 0; j < result.Len(); j++ { - if elem := result.Index(j); elem.CanAddr() && resultsMap[elem.Addr()] != true { - resultsMap[elem.Addr()] = true - results = reflect.Append(results, elem.Addr()) - } - } - } else if result.CanAddr() && resultsMap[result.Addr()] != true { - resultsMap[result.Addr()] = true - results = reflect.Append(results, result.Addr()) - } - } - return scope.New(results.Interface()) - } - case reflect.Struct: - if field := indirectScopeValue.FieldByName(column); field.CanAddr() { - return scope.New(field.Addr().Interface()) - } - } - return nil -} - -func (scope *Scope) hasConditions() bool { - return !scope.PrimaryKeyZero() || - len(scope.Search.whereConditions) > 0 || - len(scope.Search.orConditions) > 0 || - len(scope.Search.notConditions) > 0 -} diff --git a/vendor/github.com/jinzhu/gorm/search.go b/vendor/github.com/jinzhu/gorm/search.go deleted file mode 100644 index 7c4cc18..0000000 --- a/vendor/github.com/jinzhu/gorm/search.go +++ /dev/null @@ -1,153 +0,0 @@ -package gorm - -import ( - "fmt" -) - -type search struct { - db *DB - whereConditions []map[string]interface{} - orConditions []map[string]interface{} - notConditions []map[string]interface{} - havingConditions []map[string]interface{} - joinConditions []map[string]interface{} - initAttrs []interface{} - assignAttrs []interface{} - selects map[string]interface{} - omits []string - orders []interface{} - preload []searchPreload - offset interface{} - limit interface{} - group string - tableName string - raw bool - Unscoped bool - ignoreOrderQuery bool -} - -type searchPreload struct { - schema string - conditions []interface{} -} - -func (s *search) clone() *search { - clone := *s - return &clone -} - -func (s *search) Where(query interface{}, values ...interface{}) *search { - s.whereConditions = append(s.whereConditions, map[string]interface{}{"query": query, "args": values}) - return s -} - -func (s *search) Not(query interface{}, values ...interface{}) *search { - s.notConditions = append(s.notConditions, map[string]interface{}{"query": query, "args": values}) - return s -} - -func (s *search) Or(query interface{}, values ...interface{}) *search { - s.orConditions = append(s.orConditions, map[string]interface{}{"query": query, "args": values}) - return s -} - -func (s *search) Attrs(attrs ...interface{}) *search { - s.initAttrs = append(s.initAttrs, toSearchableMap(attrs...)) - return s -} - -func (s *search) Assign(attrs ...interface{}) *search { - s.assignAttrs = append(s.assignAttrs, toSearchableMap(attrs...)) - return s -} - -func (s *search) Order(value interface{}, reorder ...bool) *search { - if len(reorder) > 0 && reorder[0] { - s.orders = []interface{}{} - } - - if value != nil && value != "" { - s.orders = append(s.orders, value) - } - return s -} - -func (s *search) Select(query interface{}, args ...interface{}) *search { - s.selects = map[string]interface{}{"query": query, "args": args} - return s -} - -func (s *search) Omit(columns ...string) *search { - s.omits = columns - return s -} - -func (s *search) Limit(limit interface{}) *search { - s.limit = limit - return s -} - -func (s *search) Offset(offset interface{}) *search { - s.offset = offset - return s -} - -func (s *search) Group(query string) *search { - s.group = s.getInterfaceAsSQL(query) - return s -} - -func (s *search) Having(query interface{}, values ...interface{}) *search { - if val, ok := query.(*SqlExpr); ok { - s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": val.expr, "args": val.args}) - } else { - s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": query, "args": values}) - } - return s -} - -func (s *search) Joins(query string, values ...interface{}) *search { - s.joinConditions = append(s.joinConditions, map[string]interface{}{"query": query, "args": values}) - return s -} - -func (s *search) Preload(schema string, values ...interface{}) *search { - var preloads []searchPreload - for _, preload := range s.preload { - if preload.schema != schema { - preloads = append(preloads, preload) - } - } - preloads = append(preloads, searchPreload{schema, values}) - s.preload = preloads - return s -} - -func (s *search) Raw(b bool) *search { - s.raw = b - return s -} - -func (s *search) unscoped() *search { - s.Unscoped = true - return s -} - -func (s *search) Table(name string) *search { - s.tableName = name - return s -} - -func (s *search) getInterfaceAsSQL(value interface{}) (str string) { - switch value.(type) { - case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - str = fmt.Sprintf("%v", value) - default: - s.db.AddError(ErrInvalidSQL) - } - - if str == "-1" { - return "" - } - return -} diff --git a/vendor/github.com/jinzhu/gorm/test_all.sh b/vendor/github.com/jinzhu/gorm/test_all.sh deleted file mode 100644 index 5cfb332..0000000 --- a/vendor/github.com/jinzhu/gorm/test_all.sh +++ /dev/null @@ -1,5 +0,0 @@ -dialects=("postgres" "mysql" "mssql" "sqlite") - -for dialect in "${dialects[@]}" ; do - DEBUG=false GORM_DIALECT=${dialect} go test -done diff --git a/vendor/github.com/jinzhu/gorm/utils.go b/vendor/github.com/jinzhu/gorm/utils.go deleted file mode 100644 index d2ae946..0000000 --- a/vendor/github.com/jinzhu/gorm/utils.go +++ /dev/null @@ -1,226 +0,0 @@ -package gorm - -import ( - "database/sql/driver" - "fmt" - "reflect" - "regexp" - "runtime" - "strings" - "sync" - "time" -) - -// NowFunc returns current time, this function is exported in order to be able -// to give the flexibility to the developer to customize it according to their -// needs, e.g: -// gorm.NowFunc = func() time.Time { -// return time.Now().UTC() -// } -var NowFunc = func() time.Time { - return time.Now() -} - -// Copied from golint -var commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UID", "UI", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"} -var commonInitialismsReplacer *strings.Replacer - -var goSrcRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*.go`) -var goTestRegexp = regexp.MustCompile(`jinzhu/gorm(@.*)?/.*test.go`) - -func init() { - var commonInitialismsForReplacer []string - for _, initialism := range commonInitialisms { - commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism))) - } - commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...) -} - -type safeMap struct { - m map[string]string - l *sync.RWMutex -} - -func (s *safeMap) Set(key string, value string) { - s.l.Lock() - defer s.l.Unlock() - s.m[key] = value -} - -func (s *safeMap) Get(key string) string { - s.l.RLock() - defer s.l.RUnlock() - return s.m[key] -} - -func newSafeMap() *safeMap { - return &safeMap{l: new(sync.RWMutex), m: make(map[string]string)} -} - -// SQL expression -type SqlExpr struct { - expr string - args []interface{} -} - -// Expr generate raw SQL expression, for example: -// DB.Model(&product).Update("price", gorm.Expr("price * ? + ?", 2, 100)) -func Expr(expression string, args ...interface{}) *SqlExpr { - return &SqlExpr{expr: expression, args: args} -} - -func indirect(reflectValue reflect.Value) reflect.Value { - for reflectValue.Kind() == reflect.Ptr { - reflectValue = reflectValue.Elem() - } - return reflectValue -} - -func toQueryMarks(primaryValues [][]interface{}) string { - var results []string - - for _, primaryValue := range primaryValues { - var marks []string - for range primaryValue { - marks = append(marks, "?") - } - - if len(marks) > 1 { - results = append(results, fmt.Sprintf("(%v)", strings.Join(marks, ","))) - } else { - results = append(results, strings.Join(marks, "")) - } - } - return strings.Join(results, ",") -} - -func toQueryCondition(scope *Scope, columns []string) string { - var newColumns []string - for _, column := range columns { - newColumns = append(newColumns, scope.Quote(column)) - } - - if len(columns) > 1 { - return fmt.Sprintf("(%v)", strings.Join(newColumns, ",")) - } - return strings.Join(newColumns, ",") -} - -func toQueryValues(values [][]interface{}) (results []interface{}) { - for _, value := range values { - for _, v := range value { - results = append(results, v) - } - } - return -} - -func fileWithLineNum() string { - for i := 2; i < 15; i++ { - _, file, line, ok := runtime.Caller(i) - if ok && (!goSrcRegexp.MatchString(file) || goTestRegexp.MatchString(file)) { - return fmt.Sprintf("%v:%v", file, line) - } - } - return "" -} - -func isBlank(value reflect.Value) bool { - switch value.Kind() { - case reflect.String: - return value.Len() == 0 - case reflect.Bool: - return !value.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return value.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return value.Uint() == 0 - case reflect.Float32, reflect.Float64: - return value.Float() == 0 - case reflect.Interface, reflect.Ptr: - return value.IsNil() - } - - return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface()) -} - -func toSearchableMap(attrs ...interface{}) (result interface{}) { - if len(attrs) > 1 { - if str, ok := attrs[0].(string); ok { - result = map[string]interface{}{str: attrs[1]} - } - } else if len(attrs) == 1 { - if attr, ok := attrs[0].(map[string]interface{}); ok { - result = attr - } - - if attr, ok := attrs[0].(interface{}); ok { - result = attr - } - } - return -} - -func equalAsString(a interface{}, b interface{}) bool { - return toString(a) == toString(b) -} - -func toString(str interface{}) string { - if values, ok := str.([]interface{}); ok { - var results []string - for _, value := range values { - results = append(results, toString(value)) - } - return strings.Join(results, "_") - } else if bytes, ok := str.([]byte); ok { - return string(bytes) - } else if reflectValue := reflect.Indirect(reflect.ValueOf(str)); reflectValue.IsValid() { - return fmt.Sprintf("%v", reflectValue.Interface()) - } - return "" -} - -func makeSlice(elemType reflect.Type) interface{} { - if elemType.Kind() == reflect.Slice { - elemType = elemType.Elem() - } - sliceType := reflect.SliceOf(elemType) - slice := reflect.New(sliceType) - slice.Elem().Set(reflect.MakeSlice(sliceType, 0, 0)) - return slice.Interface() -} - -func strInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// getValueFromFields return given fields's value -func getValueFromFields(value reflect.Value, fieldNames []string) (results []interface{}) { - // If value is a nil pointer, Indirect returns a zero Value! - // Therefor we need to check for a zero value, - // as FieldByName could panic - if indirectValue := reflect.Indirect(value); indirectValue.IsValid() { - for _, fieldName := range fieldNames { - if fieldValue := reflect.Indirect(indirectValue.FieldByName(fieldName)); fieldValue.IsValid() { - result := fieldValue.Interface() - if r, ok := result.(driver.Valuer); ok { - result, _ = r.Value() - } - results = append(results, result) - } - } - } - return -} - -func addExtraSpaceIfExist(str string) string { - if str != "" { - return " " + str - } - return "" -} diff --git a/vendor/github.com/jinzhu/gorm/wercker.yml b/vendor/github.com/jinzhu/gorm/wercker.yml deleted file mode 100644 index c74fa4d..0000000 --- a/vendor/github.com/jinzhu/gorm/wercker.yml +++ /dev/null @@ -1,154 +0,0 @@ -# use the default golang container from Docker Hub -box: golang - -services: - - name: mariadb - id: mariadb:latest - env: - MYSQL_DATABASE: gorm - MYSQL_USER: gorm - MYSQL_PASSWORD: gorm - MYSQL_RANDOM_ROOT_PASSWORD: "yes" - - name: mysql - id: mysql:latest - env: - MYSQL_DATABASE: gorm - MYSQL_USER: gorm - MYSQL_PASSWORD: gorm - MYSQL_RANDOM_ROOT_PASSWORD: "yes" - - name: mysql57 - id: mysql:5.7 - env: - MYSQL_DATABASE: gorm - MYSQL_USER: gorm - MYSQL_PASSWORD: gorm - MYSQL_RANDOM_ROOT_PASSWORD: "yes" - - name: mysql56 - id: mysql:5.6 - env: - MYSQL_DATABASE: gorm - MYSQL_USER: gorm - MYSQL_PASSWORD: gorm - MYSQL_RANDOM_ROOT_PASSWORD: "yes" - - name: postgres - id: postgres:latest - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: postgres96 - id: postgres:9.6 - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: postgres95 - id: postgres:9.5 - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: postgres94 - id: postgres:9.4 - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: postgres93 - id: postgres:9.3 - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: mssql - id: mcmoe/mssqldocker:latest - env: - ACCEPT_EULA: Y - SA_PASSWORD: LoremIpsum86 - MSSQL_DB: gorm - MSSQL_USER: gorm - MSSQL_PASSWORD: LoremIpsum86 - -# The steps that will be executed in the build pipeline -build: - # The steps that will be executed on build - steps: - # Sets the go workspace and places you package - # at the right place in the workspace tree - - setup-go-workspace - - # Gets the dependencies - - script: - name: go get - code: | - cd $WERCKER_SOURCE_DIR - go version - go get -t -v ./... - - # Build the project - - script: - name: go build - code: | - go build ./... - - # Test the project - - script: - name: test sqlite - code: | - go test -race -v ./... - - - script: - name: test mariadb - code: | - GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mariadb:3306)/gorm?charset=utf8&parseTime=True" go test -race ./... - - - script: - name: test mysql - code: | - GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql:3306)/gorm?charset=utf8&parseTime=True" go test -race ./... - - - script: - name: test mysql5.7 - code: | - GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql57:3306)/gorm?charset=utf8&parseTime=True" go test -race ./... - - - script: - name: test mysql5.6 - code: | - GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql56:3306)/gorm?charset=utf8&parseTime=True" go test -race ./... - - - script: - name: test postgres - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./... - - - script: - name: test postgres96 - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres96 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./... - - - script: - name: test postgres95 - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres95 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./... - - - script: - name: test postgres94 - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres94 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./... - - - script: - name: test postgres93 - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres93 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test -race ./... - - - script: - name: test mssql - code: | - GORM_DIALECT=mssql GORM_DSN="sqlserver://gorm:LoremIpsum86@mssql:1433?database=gorm" go test -race ./... - - - script: - name: codecov - code: | - go test -race -coverprofile=coverage.txt -covermode=atomic ./... - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE deleted file mode 100644 index a1ca9a0..0000000 --- a/vendor/github.com/jinzhu/inflection/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 - Jinzhu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md deleted file mode 100644 index a3de336..0000000 --- a/vendor/github.com/jinzhu/inflection/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Inflection - -Inflection pluralizes and singularizes English nouns - -[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930) - -## Basic Usage - -```go -inflection.Plural("person") => "people" -inflection.Plural("Person") => "People" -inflection.Plural("PERSON") => "PEOPLE" -inflection.Plural("bus") => "buses" -inflection.Plural("BUS") => "BUSES" -inflection.Plural("Bus") => "Buses" - -inflection.Singular("people") => "person" -inflection.Singular("People") => "Person" -inflection.Singular("PEOPLE") => "PERSON" -inflection.Singular("buses") => "bus" -inflection.Singular("BUSES") => "BUS" -inflection.Singular("Buses") => "Bus" - -inflection.Plural("FancyPerson") => "FancyPeople" -inflection.Singular("FancyPeople") => "FancyPerson" -``` - -## Register Rules - -Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) - -If you want to register more rules, follow: - -``` -inflection.AddUncountable("fish") -inflection.AddIrregular("person", "people") -inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" -inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" -``` - -## Contributing - -You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do. - -## Author - -**jinzhu** - -* -* -* - -## License - -Released under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/jinzhu/inflection/go.mod b/vendor/github.com/jinzhu/inflection/go.mod deleted file mode 100644 index 2fb7690..0000000 --- a/vendor/github.com/jinzhu/inflection/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/jinzhu/inflection diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go deleted file mode 100644 index 606263b..0000000 --- a/vendor/github.com/jinzhu/inflection/inflections.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Package inflection pluralizes and singularizes English nouns. - - inflection.Plural("person") => "people" - inflection.Plural("Person") => "People" - inflection.Plural("PERSON") => "PEOPLE" - - inflection.Singular("people") => "person" - inflection.Singular("People") => "Person" - inflection.Singular("PEOPLE") => "PERSON" - - inflection.Plural("FancyPerson") => "FancydPeople" - inflection.Singular("FancyPeople") => "FancydPerson" - -Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) - -If you want to register more rules, follow: - - inflection.AddUncountable("fish") - inflection.AddIrregular("person", "people") - inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" - inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" -*/ -package inflection - -import ( - "regexp" - "strings" -) - -type inflection struct { - regexp *regexp.Regexp - replace string -} - -// Regular is a regexp find replace inflection -type Regular struct { - find string - replace string -} - -// Irregular is a hard replace inflection, -// containing both singular and plural forms -type Irregular struct { - singular string - plural string -} - -// RegularSlice is a slice of Regular inflections -type RegularSlice []Regular - -// IrregularSlice is a slice of Irregular inflections -type IrregularSlice []Irregular - -var pluralInflections = RegularSlice{ - {"([a-z])$", "${1}s"}, - {"s$", "s"}, - {"^(ax|test)is$", "${1}es"}, - {"(octop|vir)us$", "${1}i"}, - {"(octop|vir)i$", "${1}i"}, - {"(alias|status)$", "${1}es"}, - {"(bu)s$", "${1}ses"}, - {"(buffal|tomat)o$", "${1}oes"}, - {"([ti])um$", "${1}a"}, - {"([ti])a$", "${1}a"}, - {"sis$", "ses"}, - {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"}, - {"(hive)$", "${1}s"}, - {"([^aeiouy]|qu)y$", "${1}ies"}, - {"(x|ch|ss|sh)$", "${1}es"}, - {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"}, - {"^(m|l)ouse$", "${1}ice"}, - {"^(m|l)ice$", "${1}ice"}, - {"^(ox)$", "${1}en"}, - {"^(oxen)$", "${1}"}, - {"(quiz)$", "${1}zes"}, -} - -var singularInflections = RegularSlice{ - {"s$", ""}, - {"(ss)$", "${1}"}, - {"(n)ews$", "${1}ews"}, - {"([ti])a$", "${1}um"}, - {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"}, - {"(^analy)(sis|ses)$", "${1}sis"}, - {"([^f])ves$", "${1}fe"}, - {"(hive)s$", "${1}"}, - {"(tive)s$", "${1}"}, - {"([lr])ves$", "${1}f"}, - {"([^aeiouy]|qu)ies$", "${1}y"}, - {"(s)eries$", "${1}eries"}, - {"(m)ovies$", "${1}ovie"}, - {"(c)ookies$", "${1}ookie"}, - {"(x|ch|ss|sh)es$", "${1}"}, - {"^(m|l)ice$", "${1}ouse"}, - {"(bus)(es)?$", "${1}"}, - {"(o)es$", "${1}"}, - {"(shoe)s$", "${1}"}, - {"(cris|test)(is|es)$", "${1}is"}, - {"^(a)x[ie]s$", "${1}xis"}, - {"(octop|vir)(us|i)$", "${1}us"}, - {"(alias|status)(es)?$", "${1}"}, - {"^(ox)en", "${1}"}, - {"(vert|ind)ices$", "${1}ex"}, - {"(matr)ices$", "${1}ix"}, - {"(quiz)zes$", "${1}"}, - {"(database)s$", "${1}"}, -} - -var irregularInflections = IrregularSlice{ - {"person", "people"}, - {"man", "men"}, - {"child", "children"}, - {"sex", "sexes"}, - {"move", "moves"}, - {"mombie", "mombies"}, -} - -var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"} - -var compiledPluralMaps []inflection -var compiledSingularMaps []inflection - -func compile() { - compiledPluralMaps = []inflection{} - compiledSingularMaps = []inflection{} - for _, uncountable := range uncountableInflections { - inf := inflection{ - regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"), - replace: "${1}", - } - compiledPluralMaps = append(compiledPluralMaps, inf) - compiledSingularMaps = append(compiledSingularMaps, inf) - } - - for _, value := range irregularInflections { - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)}, - inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)}, - inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural}, - } - compiledPluralMaps = append(compiledPluralMaps, infs...) - } - - for _, value := range irregularInflections { - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)}, - inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)}, - inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular}, - } - compiledSingularMaps = append(compiledSingularMaps, infs...) - } - - for i := len(pluralInflections) - 1; i >= 0; i-- { - value := pluralInflections[i] - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, - inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, - inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, - } - compiledPluralMaps = append(compiledPluralMaps, infs...) - } - - for i := len(singularInflections) - 1; i >= 0; i-- { - value := singularInflections[i] - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, - inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, - inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, - } - compiledSingularMaps = append(compiledSingularMaps, infs...) - } -} - -func init() { - compile() -} - -// AddPlural adds a plural inflection -func AddPlural(find, replace string) { - pluralInflections = append(pluralInflections, Regular{find, replace}) - compile() -} - -// AddSingular adds a singular inflection -func AddSingular(find, replace string) { - singularInflections = append(singularInflections, Regular{find, replace}) - compile() -} - -// AddIrregular adds an irregular inflection -func AddIrregular(singular, plural string) { - irregularInflections = append(irregularInflections, Irregular{singular, plural}) - compile() -} - -// AddUncountable adds an uncountable inflection -func AddUncountable(values ...string) { - uncountableInflections = append(uncountableInflections, values...) - compile() -} - -// GetPlural retrieves the plural inflection values -func GetPlural() RegularSlice { - plurals := make(RegularSlice, len(pluralInflections)) - copy(plurals, pluralInflections) - return plurals -} - -// GetSingular retrieves the singular inflection values -func GetSingular() RegularSlice { - singulars := make(RegularSlice, len(singularInflections)) - copy(singulars, singularInflections) - return singulars -} - -// GetIrregular retrieves the irregular inflection values -func GetIrregular() IrregularSlice { - irregular := make(IrregularSlice, len(irregularInflections)) - copy(irregular, irregularInflections) - return irregular -} - -// GetUncountable retrieves the uncountable inflection values -func GetUncountable() []string { - uncountables := make([]string, len(uncountableInflections)) - copy(uncountables, uncountableInflections) - return uncountables -} - -// SetPlural sets the plural inflections slice -func SetPlural(inflections RegularSlice) { - pluralInflections = inflections - compile() -} - -// SetSingular sets the singular inflections slice -func SetSingular(inflections RegularSlice) { - singularInflections = inflections - compile() -} - -// SetIrregular sets the irregular inflections slice -func SetIrregular(inflections IrregularSlice) { - irregularInflections = inflections - compile() -} - -// SetUncountable sets the uncountable inflections slice -func SetUncountable(inflections []string) { - uncountableInflections = inflections - compile() -} - -// Plural converts a word to its plural form -func Plural(str string) string { - for _, inflection := range compiledPluralMaps { - if inflection.regexp.MatchString(str) { - return inflection.regexp.ReplaceAllString(str, inflection.replace) - } - } - return str -} - -// Singular converts a word to its singular form -func Singular(str string) string { - for _, inflection := range compiledSingularMaps { - if inflection.regexp.MatchString(str) { - return inflection.regexp.ReplaceAllString(str, inflection.replace) - } - } - return str -} diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml deleted file mode 100644 index 5e6ce98..0000000 --- a/vendor/github.com/jinzhu/inflection/wercker.yml +++ /dev/null @@ -1,23 +0,0 @@ -box: golang - -build: - steps: - - setup-go-workspace - - # Gets the dependencies - - script: - name: go get - code: | - go get - - # Build the project - - script: - name: go build - code: | - go build ./... - - # Test the project - - script: - name: go test - code: | - go test ./... diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml deleted file mode 100644 index 6bc68d6..0000000 --- a/vendor/github.com/jmoiron/sqlx/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -# vim: ft=yaml sw=2 ts=2 - -language: go - -# enable database services -services: - - mysql - - postgresql - -# create test database -before_install: - - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;' - - psql -c 'create database sqlxtest;' -U postgres - - go get github.com/mattn/goveralls - - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true" - - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable" - - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db" - -# go versions to test -go: - - "1.8" - - "1.9" - - "1.10.x" - -# run tests w/ coverage -script: - - travis_retry $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE deleted file mode 100644 index 0d31edf..0000000 --- a/vendor/github.com/jmoiron/sqlx/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ - Copyright (c) 2013, Jason Moiron - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md deleted file mode 100644 index 8390343..0000000 --- a/vendor/github.com/jmoiron/sqlx/README.md +++ /dev/null @@ -1,187 +0,0 @@ -# sqlx - -[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) - -sqlx is a library which provides a set of extensions on go's standard -`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, -et al. all leave the underlying interfaces untouched, so that their interfaces -are a superset on the standard ones. This makes it relatively painless to -integrate existing codebases using database/sql with sqlx. - -Major additional concepts are: - -* Marshal rows into structs (with embedded struct support), maps, and slices -* Named parameter support including prepared statements -* `Get` and `Select` to go quickly from query to struct/slice - -In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), -there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that -explains how to use `database/sql` along with sqlx. - -## Recent Changes - -* The [introduction](https://github.com/jmoiron/sqlx/pull/387) of `sql.ColumnType` sets the required minimum Go version to 1.8. - -* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions. - -This breaks backwards compatibility, but it's in a way that is trivially fixable -(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in -active development currently. - -* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905). - -### Backwards Compatibility - -There is no Go1-like promise of absolute stability, but I take the issue seriously -and will maintain the library in a compatible state unless vital bugs prevent me -from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and -[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior, -a wider API cleanup was done at the time of fixing. It's possible this will happen -in future; if it does, a git tag will be provided for users requiring the old -behavior to continue to use it until such a time as they can migrate. - -## install - - go get github.com/jmoiron/sqlx - -## issues - -Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of -`Columns()` does not fully qualify column names in queries like: - -```sql -SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; -``` - -making a struct or map destination ambiguous. Use `AS` in your queries -to give columns distinct names, `rows.Scan` to scan them manually, or -`SliceScan` to get a slice of results. - -## usage - -Below is an example which shows some common use cases for sqlx. Check -[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more -usage. - - -```go -package main - -import ( - "database/sql" - "fmt" - "log" - - _ "github.com/lib/pq" - "github.com/jmoiron/sqlx" -) - -var schema = ` -CREATE TABLE person ( - first_name text, - last_name text, - email text -); - -CREATE TABLE place ( - country text, - city text NULL, - telcode integer -)` - -type Person struct { - FirstName string `db:"first_name"` - LastName string `db:"last_name"` - Email string -} - -type Place struct { - Country string - City sql.NullString - TelCode int -} - -func main() { - // this Pings the database trying to connect, panics on error - // use sqlx.Open() for sql.Open() semantics - db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") - if err != nil { - log.Fatalln(err) - } - - // exec the schema or fail; multi-statement Exec behavior varies between - // database drivers; pq will exec them all, sqlite3 won't, ymmv - db.MustExec(schema) - - tx := db.MustBegin() - tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") - tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") - tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") - tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") - tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") - // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person - tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) - tx.Commit() - - // Query the database, storing results in a []Person (wrapped in []interface{}) - people := []Person{} - db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") - jason, john := people[0], people[1] - - fmt.Printf("%#v\n%#v", jason, john) - // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} - // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} - - // You can also get a single result, a la QueryRow - jason = Person{} - err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") - fmt.Printf("%#v\n", jason) - // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} - - // if you have null fields and use SELECT *, you must use sql.Null* in your struct - places := []Place{} - err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") - if err != nil { - fmt.Println(err) - return - } - usa, singsing, honkers := places[0], places[1], places[2] - - fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) - // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} - // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} - // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} - - // Loop through rows using only one struct - place := Place{} - rows, err := db.Queryx("SELECT * FROM place") - for rows.Next() { - err := rows.StructScan(&place) - if err != nil { - log.Fatalln(err) - } - fmt.Printf("%#v\n", place) - } - // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} - // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} - // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} - - // Named queries, using `:name` as the bindvar. Automatic bindvar support - // which takes into account the dbtype based on the driverName on sqlx.Open/Connect - _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, - map[string]interface{}{ - "first": "Bin", - "last": "Smuth", - "email": "bensmith@allblacks.nz", - }) - - // Selects Mr. Smith from the database - rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) - - // Named queries can also use structs. Their bind names follow the same rules - // as the name -> db mapping, so struct fields are lowercased and the `db` tag - // is taken into consideration. - rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) -} -``` - diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go deleted file mode 100644 index 0a48252..0000000 --- a/vendor/github.com/jmoiron/sqlx/bind.go +++ /dev/null @@ -1,217 +0,0 @@ -package sqlx - -import ( - "bytes" - "database/sql/driver" - "errors" - "reflect" - "strconv" - "strings" - - "github.com/jmoiron/sqlx/reflectx" -) - -// Bindvar types supported by Rebind, BindMap and BindStruct. -const ( - UNKNOWN = iota - QUESTION - DOLLAR - NAMED - AT -) - -// BindType returns the bindtype for a given database given a drivername. -func BindType(driverName string) int { - switch driverName { - case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres": - return DOLLAR - case "mysql": - return QUESTION - case "sqlite3": - return QUESTION - case "oci8", "ora", "goracle": - return NAMED - case "sqlserver": - return AT - } - return UNKNOWN -} - -// FIXME: this should be able to be tolerant of escaped ?'s in queries without -// losing much speed, and should be to avoid confusion. - -// Rebind a query from the default bindtype (QUESTION) to the target bindtype. -func Rebind(bindType int, query string) string { - switch bindType { - case QUESTION, UNKNOWN: - return query - } - - // Add space enough for 10 params before we have to allocate - rqb := make([]byte, 0, len(query)+10) - - var i, j int - - for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { - rqb = append(rqb, query[:i]...) - - switch bindType { - case DOLLAR: - rqb = append(rqb, '$') - case NAMED: - rqb = append(rqb, ':', 'a', 'r', 'g') - case AT: - rqb = append(rqb, '@', 'p') - } - - j++ - rqb = strconv.AppendInt(rqb, int64(j), 10) - - query = query[i+1:] - } - - return string(append(rqb, query...)) -} - -// Experimental implementation of Rebind which uses a bytes.Buffer. The code is -// much simpler and should be more resistant to odd unicode, but it is twice as -// slow. Kept here for benchmarking purposes and to possibly replace Rebind if -// problems arise with its somewhat naive handling of unicode. -func rebindBuff(bindType int, query string) string { - if bindType != DOLLAR { - return query - } - - b := make([]byte, 0, len(query)) - rqb := bytes.NewBuffer(b) - j := 1 - for _, r := range query { - if r == '?' { - rqb.WriteRune('$') - rqb.WriteString(strconv.Itoa(j)) - j++ - } else { - rqb.WriteRune(r) - } - } - - return rqb.String() -} - -// In expands slice values in args, returning the modified query string -// and a new arg list that can be executed by a database. The `query` should -// use the `?` bindVar. The return value uses the `?` bindVar. -func In(query string, args ...interface{}) (string, []interface{}, error) { - // argMeta stores reflect.Value and length for slices and - // the value itself for non-slice arguments - type argMeta struct { - v reflect.Value - i interface{} - length int - } - - var flatArgsCount int - var anySlices bool - - meta := make([]argMeta, len(args)) - - for i, arg := range args { - if a, ok := arg.(driver.Valuer); ok { - arg, _ = a.Value() - } - v := reflect.ValueOf(arg) - t := reflectx.Deref(v.Type()) - - // []byte is a driver.Value type so it should not be expanded - if t.Kind() == reflect.Slice && t != reflect.TypeOf([]byte{}) { - meta[i].length = v.Len() - meta[i].v = v - - anySlices = true - flatArgsCount += meta[i].length - - if meta[i].length == 0 { - return "", nil, errors.New("empty slice passed to 'in' query") - } - } else { - meta[i].i = arg - flatArgsCount++ - } - } - - // don't do any parsing if there aren't any slices; note that this means - // some errors that we might have caught below will not be returned. - if !anySlices { - return query, args, nil - } - - newArgs := make([]interface{}, 0, flatArgsCount) - buf := make([]byte, 0, len(query)+len(", ?")*flatArgsCount) - - var arg, offset int - - for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { - if arg >= len(meta) { - // if an argument wasn't passed, lets return an error; this is - // not actually how database/sql Exec/Query works, but since we are - // creating an argument list programmatically, we want to be able - // to catch these programmer errors earlier. - return "", nil, errors.New("number of bindVars exceeds arguments") - } - - argMeta := meta[arg] - arg++ - - // not a slice, continue. - // our questionmark will either be written before the next expansion - // of a slice or after the loop when writing the rest of the query - if argMeta.length == 0 { - offset = offset + i + 1 - newArgs = append(newArgs, argMeta.i) - continue - } - - // write everything up to and including our ? character - buf = append(buf, query[:offset+i+1]...) - - for si := 1; si < argMeta.length; si++ { - buf = append(buf, ", ?"...) - } - - newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) - - // slice the query and reset the offset. this avoids some bookkeeping for - // the write after the loop - query = query[offset+i+1:] - offset = 0 - } - - buf = append(buf, query...) - - if arg < len(meta) { - return "", nil, errors.New("number of bindVars less than number arguments") - } - - return string(buf), newArgs, nil -} - -func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { - switch val := v.Interface().(type) { - case []interface{}: - args = append(args, val...) - case []int: - for i := range val { - args = append(args, val[i]) - } - case []string: - for i := range val { - args = append(args, val[i]) - } - default: - for si := 0; si < vlen; si++ { - args = append(args, v.Index(si).Interface()) - } - } - - return args -} diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go deleted file mode 100644 index e2b4e60..0000000 --- a/vendor/github.com/jmoiron/sqlx/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package sqlx provides general purpose extensions to database/sql. -// -// It is intended to seamlessly wrap database/sql and provide convenience -// methods which are useful in the development of database driven applications. -// None of the underlying database/sql methods are changed. Instead all extended -// behavior is implemented through new methods defined on wrapper types. -// -// Additions include scanning into structs, named query support, rebinding -// queries for different drivers, convenient shorthands for common error handling -// and more. -// -package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/go.mod b/vendor/github.com/jmoiron/sqlx/go.mod deleted file mode 100644 index 66c6756..0000000 --- a/vendor/github.com/jmoiron/sqlx/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/jmoiron/sqlx - -require ( - github.com/go-sql-driver/mysql v1.4.0 - github.com/lib/pq v1.0.0 - github.com/mattn/go-sqlite3 v1.9.0 -) diff --git a/vendor/github.com/jmoiron/sqlx/go.sum b/vendor/github.com/jmoiron/sqlx/go.sum deleted file mode 100644 index a3239ad..0000000 --- a/vendor/github.com/jmoiron/sqlx/go.sum +++ /dev/null @@ -1,6 +0,0 @@ -github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go deleted file mode 100644 index fa82b56..0000000 --- a/vendor/github.com/jmoiron/sqlx/named.go +++ /dev/null @@ -1,356 +0,0 @@ -package sqlx - -// Named Query Support -// -// * BindMap - bind query bindvars to map/struct args -// * NamedExec, NamedQuery - named query w/ struct or map -// * NamedStmt - a pre-compiled named query which is a prepared statement -// -// Internal Interfaces: -// -// * compileNamedQuery - rebind a named query, returning a query and list of names -// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist -// -import ( - "database/sql" - "errors" - "fmt" - "reflect" - "strconv" - "unicode" - - "github.com/jmoiron/sqlx/reflectx" -) - -// NamedStmt is a prepared statement that executes named queries. Prepare it -// how you would execute a NamedQuery, but pass in a struct or map when executing. -type NamedStmt struct { - Params []string - QueryString string - Stmt *Stmt -} - -// Close closes the named statement. -func (n *NamedStmt) Close() error { - return n.Stmt.Close() -} - -// Exec executes a named statement using the struct passed. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return *new(sql.Result), err - } - return n.Stmt.Exec(args...) -} - -// Query executes a named statement using the struct argument, returning rows. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return nil, err - } - return n.Stmt.Query(args...) -} - -// QueryRow executes a named statement against the database. Because sqlx cannot -// create a *sql.Row with an error condition pre-set for binding errors, sqlx -// returns a *sqlx.Row instead. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryRow(arg interface{}) *Row { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return &Row{err: err} - } - return n.Stmt.QueryRowx(args...) -} - -// MustExec execs a NamedStmt, panicing on error -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) MustExec(arg interface{}) sql.Result { - res, err := n.Exec(arg) - if err != nil { - panic(err) - } - return res -} - -// Queryx using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { - r, err := n.Query(arg) - if err != nil { - return nil, err - } - return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err -} - -// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is -// an alias for QueryRow. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryRowx(arg interface{}) *Row { - return n.QueryRow(arg) -} - -// Select using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { - rows, err := n.Queryx(arg) - if err != nil { - return err - } - // if something happens here, we want to make sure the rows are Closed - defer rows.Close() - return scanAll(rows, dest, false) -} - -// Get using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { - r := n.QueryRowx(arg) - return r.scanAny(dest, false) -} - -// Unsafe creates an unsafe version of the NamedStmt -func (n *NamedStmt) Unsafe() *NamedStmt { - r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} - r.Stmt.unsafe = true - return r -} - -// A union interface of preparer and binder, required to be able to prepare -// named statements (as the bindtype must be determined). -type namedPreparer interface { - Preparer - binder -} - -func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { - bindType := BindType(p.DriverName()) - q, args, err := compileNamedQuery([]byte(query), bindType) - if err != nil { - return nil, err - } - stmt, err := Preparex(p, q) - if err != nil { - return nil, err - } - return &NamedStmt{ - QueryString: q, - Params: args, - Stmt: stmt, - }, nil -} - -func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { - if maparg, ok := arg.(map[string]interface{}); ok { - return bindMapArgs(names, maparg) - } - return bindArgs(names, arg, m) -} - -// private interface to generate a list of interfaces from a given struct -// type, given a list of names to pull out of the struct. Used by public -// BindStruct interface. -func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { - arglist := make([]interface{}, 0, len(names)) - - // grab the indirected value of arg - v := reflect.ValueOf(arg) - for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { - v = v.Elem() - } - - err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { - if len(t) == 0 { - return fmt.Errorf("could not find name %s in %#v", names[i], arg) - } - - val := reflectx.FieldByIndexesReadOnly(v, t) - arglist = append(arglist, val.Interface()) - - return nil - }) - - return arglist, err -} - -// like bindArgs, but for maps. -func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { - arglist := make([]interface{}, 0, len(names)) - - for _, name := range names { - val, ok := arg[name] - if !ok { - return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) - } - arglist = append(arglist, val) - } - return arglist, nil -} - -// bindStruct binds a named parameter query with fields from a struct argument. -// The rules for binding field names to parameter names follow the same -// conventions as for StructScan, including obeying the `db` struct tags. -func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { - bound, names, err := compileNamedQuery([]byte(query), bindType) - if err != nil { - return "", []interface{}{}, err - } - - arglist, err := bindArgs(names, arg, m) - if err != nil { - return "", []interface{}{}, err - } - - return bound, arglist, nil -} - -// bindMap binds a named parameter query with a map of arguments. -func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { - bound, names, err := compileNamedQuery([]byte(query), bindType) - if err != nil { - return "", []interface{}{}, err - } - - arglist, err := bindMapArgs(names, args) - return bound, arglist, err -} - -// -- Compilation of Named Queries - -// Allow digits and letters in bind params; additionally runes are -// checked against underscores, meaning that bind params can have be -// alphanumeric with underscores. Mind the difference between unicode -// digits and numbers, where '5' is a digit but '五' is not. -var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} - -// FIXME: this function isn't safe for unicode named params, as a failing test -// can testify. This is not a regression but a failure of the original code -// as well. It should be modified to range over runes in a string rather than -// bytes, even though this is less convenient and slower. Hopefully the -// addition of the prepared NamedStmt (which will only do this once) will make -// up for the slightly slower ad-hoc NamedExec/NamedQuery. - -// compile a NamedQuery into an unbound query (using the '?' bindvar) and -// a list of names. -func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { - names = make([]string, 0, 10) - rebound := make([]byte, 0, len(qs)) - - inName := false - last := len(qs) - 1 - currentVar := 1 - name := make([]byte, 0, 10) - - for i, b := range qs { - // a ':' while we're in a name is an error - if b == ':' { - // if this is the second ':' in a '::' escape sequence, append a ':' - if inName && i > 0 && qs[i-1] == ':' { - rebound = append(rebound, ':') - inName = false - continue - } else if inName { - err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) - return query, names, err - } - inName = true - name = []byte{} - } else if inName && i > 0 && b == '=' { - rebound = append(rebound, ':', '=') - inName = false - continue - // if we're in a name, and this is an allowed character, continue - } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { - // append the byte to the name if we are in a name and not on the last byte - name = append(name, b) - // if we're in a name and it's not an allowed character, the name is done - } else if inName { - inName = false - // if this is the final byte of the string and it is part of the name, then - // make sure to add it to the name - if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { - name = append(name, b) - } - // add the string representation to the names list - names = append(names, string(name)) - // add a proper bindvar for the bindType - switch bindType { - // oracle only supports named type bind vars even for positional - case NAMED: - rebound = append(rebound, ':') - rebound = append(rebound, name...) - case QUESTION, UNKNOWN: - rebound = append(rebound, '?') - case DOLLAR: - rebound = append(rebound, '$') - for _, b := range strconv.Itoa(currentVar) { - rebound = append(rebound, byte(b)) - } - currentVar++ - case AT: - rebound = append(rebound, '@', 'p') - for _, b := range strconv.Itoa(currentVar) { - rebound = append(rebound, byte(b)) - } - currentVar++ - } - // add this byte to string unless it was not part of the name - if i != last { - rebound = append(rebound, b) - } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { - rebound = append(rebound, b) - } - } else { - // this is a normal byte and should just go onto the rebound query - rebound = append(rebound, b) - } - } - - return string(rebound), names, err -} - -// BindNamed binds a struct or a map to a query with named parameters. -// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. -func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { - return bindNamedMapper(bindType, query, arg, mapper()) -} - -// Named takes a query using named parameters and an argument and -// returns a new query with a list of args that can be executed by -// a database. The return value uses the `?` bindvar. -func Named(query string, arg interface{}) (string, []interface{}, error) { - return bindNamedMapper(QUESTION, query, arg, mapper()) -} - -func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { - if maparg, ok := arg.(map[string]interface{}); ok { - return bindMap(bindType, query, maparg) - } - return bindStruct(bindType, query, arg, m) -} - -// NamedQuery binds a named query and then runs Query on the result using the -// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with -// map[string]interface{} types. -func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { - q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) - if err != nil { - return nil, err - } - return e.Queryx(q, args...) -} - -// NamedExec uses BindStruct to get a query executable by the driver and -// then runs Exec on the result. Returns an error from the binding -// or the query excution itself. -func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { - q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) - if err != nil { - return nil, err - } - return e.Exec(q, args...) -} diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go deleted file mode 100644 index 9405007..0000000 --- a/vendor/github.com/jmoiron/sqlx/named_context.go +++ /dev/null @@ -1,132 +0,0 @@ -// +build go1.8 - -package sqlx - -import ( - "context" - "database/sql" -) - -// A union interface of contextPreparer and binder, required to be able to -// prepare named statements with context (as the bindtype must be determined). -type namedPreparerContext interface { - PreparerContext - binder -} - -func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { - bindType := BindType(p.DriverName()) - q, args, err := compileNamedQuery([]byte(query), bindType) - if err != nil { - return nil, err - } - stmt, err := PreparexContext(ctx, p, q) - if err != nil { - return nil, err - } - return &NamedStmt{ - QueryString: q, - Params: args, - Stmt: stmt, - }, nil -} - -// ExecContext executes a named statement using the struct passed. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return *new(sql.Result), err - } - return n.Stmt.ExecContext(ctx, args...) -} - -// QueryContext executes a named statement using the struct argument, returning rows. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return nil, err - } - return n.Stmt.QueryContext(ctx, args...) -} - -// QueryRowContext executes a named statement against the database. Because sqlx cannot -// create a *sql.Row with an error condition pre-set for binding errors, sqlx -// returns a *sqlx.Row instead. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { - args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) - if err != nil { - return &Row{err: err} - } - return n.Stmt.QueryRowxContext(ctx, args...) -} - -// MustExecContext execs a NamedStmt, panicing on error -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { - res, err := n.ExecContext(ctx, arg) - if err != nil { - panic(err) - } - return res -} - -// QueryxContext using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { - r, err := n.QueryContext(ctx, arg) - if err != nil { - return nil, err - } - return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err -} - -// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is -// an alias for QueryRow. -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { - return n.QueryRowContext(ctx, arg) -} - -// SelectContext using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { - rows, err := n.QueryxContext(ctx, arg) - if err != nil { - return err - } - // if something happens here, we want to make sure the rows are Closed - defer rows.Close() - return scanAll(rows, dest, false) -} - -// GetContext using this NamedStmt -// Any named placeholder parameters are replaced with fields from arg. -func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { - r := n.QueryRowxContext(ctx, arg) - return r.scanAny(dest, false) -} - -// NamedQueryContext binds a named query and then runs Query on the result using the -// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with -// map[string]interface{} types. -func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { - q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) - if err != nil { - return nil, err - } - return e.QueryxContext(ctx, q, args...) -} - -// NamedExecContext uses BindStruct to get a query executable by the driver and -// then runs Exec on the result. Returns an error from the binding -// or the query excution itself. -func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { - q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) - if err != nil { - return nil, err - } - return e.ExecContext(ctx, q, args...) -} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md deleted file mode 100644 index f01d3d1..0000000 --- a/vendor/github.com/jmoiron/sqlx/reflectx/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# reflectx - -The sqlx package has special reflect needs. In particular, it needs to: - -* be able to map a name to a field -* understand embedded structs -* understand mapping names to fields by a particular tag -* user specified name -> field mapping functions - -These behaviors mimic the behaviors by the standard library marshallers and also the -behavior of standard Go accessors. - -The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is -addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct -tags in the ways that are vital to most marshallers, and they are slow. - -This reflectx package extends reflect to achieve these goals. diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go deleted file mode 100644 index 73c21eb..0000000 --- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go +++ /dev/null @@ -1,441 +0,0 @@ -// Package reflectx implements extensions to the standard reflect lib suitable -// for implementing marshalling and unmarshalling packages. The main Mapper type -// allows for Go-compatible named attribute access, including accessing embedded -// struct attributes and the ability to use functions and struct tags to -// customize field names. -// -package reflectx - -import ( - "reflect" - "runtime" - "strings" - "sync" -) - -// A FieldInfo is metadata for a struct field. -type FieldInfo struct { - Index []int - Path string - Field reflect.StructField - Zero reflect.Value - Name string - Options map[string]string - Embedded bool - Children []*FieldInfo - Parent *FieldInfo -} - -// A StructMap is an index of field metadata for a struct. -type StructMap struct { - Tree *FieldInfo - Index []*FieldInfo - Paths map[string]*FieldInfo - Names map[string]*FieldInfo -} - -// GetByPath returns a *FieldInfo for a given string path. -func (f StructMap) GetByPath(path string) *FieldInfo { - return f.Paths[path] -} - -// GetByTraversal returns a *FieldInfo for a given integer path. It is -// analogous to reflect.FieldByIndex, but using the cached traversal -// rather than re-executing the reflect machinery each time. -func (f StructMap) GetByTraversal(index []int) *FieldInfo { - if len(index) == 0 { - return nil - } - - tree := f.Tree - for _, i := range index { - if i >= len(tree.Children) || tree.Children[i] == nil { - return nil - } - tree = tree.Children[i] - } - return tree -} - -// Mapper is a general purpose mapper of names to struct fields. A Mapper -// behaves like most marshallers in the standard library, obeying a field tag -// for name mapping but also providing a basic transform function. -type Mapper struct { - cache map[reflect.Type]*StructMap - tagName string - tagMapFunc func(string) string - mapFunc func(string) string - mutex sync.Mutex -} - -// NewMapper returns a new mapper using the tagName as its struct field tag. -// If tagName is the empty string, it is ignored. -func NewMapper(tagName string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - } -} - -// NewMapperTagFunc returns a new mapper which contains a mapper for field names -// AND a mapper for tag values. This is useful for tags like json which can -// have values like "name,omitempty". -func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - mapFunc: mapFunc, - tagMapFunc: tagMapFunc, - } -} - -// NewMapperFunc returns a new mapper which optionally obeys a field tag and -// a struct field name mapper func given by f. Tags will take precedence, but -// for any other field, the mapped name will be f(field.Name) -func NewMapperFunc(tagName string, f func(string) string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - mapFunc: f, - } -} - -// TypeMap returns a mapping of field strings to int slices representing -// the traversal down the struct to reach the field. -func (m *Mapper) TypeMap(t reflect.Type) *StructMap { - m.mutex.Lock() - mapping, ok := m.cache[t] - if !ok { - mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) - m.cache[t] = mapping - } - m.mutex.Unlock() - return mapping -} - -// FieldMap returns the mapper's mapping of field names to reflect values. Panics -// if v's Kind is not Struct, or v is not Indirectable to a struct kind. -func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - r := map[string]reflect.Value{} - tm := m.TypeMap(v.Type()) - for tagName, fi := range tm.Names { - r[tagName] = FieldByIndexes(v, fi.Index) - } - return r -} - -// FieldByName returns a field by its mapped name as a reflect.Value. -// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. -// Returns zero Value if the name is not found. -func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - tm := m.TypeMap(v.Type()) - fi, ok := tm.Names[name] - if !ok { - return v - } - return FieldByIndexes(v, fi.Index) -} - -// FieldsByName returns a slice of values corresponding to the slice of names -// for the value. Panics if v's Kind is not Struct or v is not Indirectable -// to a struct Kind. Returns zero Value for each name not found. -func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - tm := m.TypeMap(v.Type()) - vals := make([]reflect.Value, 0, len(names)) - for _, name := range names { - fi, ok := tm.Names[name] - if !ok { - vals = append(vals, *new(reflect.Value)) - } else { - vals = append(vals, FieldByIndexes(v, fi.Index)) - } - } - return vals -} - -// TraversalsByName returns a slice of int slices which represent the struct -// traversals for each mapped name. Panics if t is not a struct or Indirectable -// to a struct. Returns empty int slice for each name not found. -func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { - r := make([][]int, 0, len(names)) - m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { - if i == nil { - r = append(r, []int{}) - } else { - r = append(r, i) - } - - return nil - }) - return r -} - -// TraversalsByNameFunc traverses the mapped names and calls fn with the index of -// each name and the struct traversal represented by that name. Panics if t is not -// a struct or Indirectable to a struct. Returns the first error returned by fn or nil. -func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { - t = Deref(t) - mustBe(t, reflect.Struct) - tm := m.TypeMap(t) - for i, name := range names { - fi, ok := tm.Names[name] - if !ok { - if err := fn(i, nil); err != nil { - return err - } - } else { - if err := fn(i, fi.Index); err != nil { - return err - } - } - } - return nil -} - -// FieldByIndexes returns a value for the field given by the struct traversal -// for the given value. -func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { - for _, i := range indexes { - v = reflect.Indirect(v).Field(i) - // if this is a pointer and it's nil, allocate a new value and set it - if v.Kind() == reflect.Ptr && v.IsNil() { - alloc := reflect.New(Deref(v.Type())) - v.Set(alloc) - } - if v.Kind() == reflect.Map && v.IsNil() { - v.Set(reflect.MakeMap(v.Type())) - } - } - return v -} - -// FieldByIndexesReadOnly returns a value for a particular struct traversal, -// but is not concerned with allocating nil pointers because the value is -// going to be used for reading and not setting. -func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { - for _, i := range indexes { - v = reflect.Indirect(v).Field(i) - } - return v -} - -// Deref is Indirect for reflect.Types -func Deref(t reflect.Type) reflect.Type { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -// -- helpers & utilities -- - -type kinder interface { - Kind() reflect.Kind -} - -// mustBe checks a value against a kind, panicing with a reflect.ValueError -// if the kind isn't that which is required. -func mustBe(v kinder, expected reflect.Kind) { - if k := v.Kind(); k != expected { - panic(&reflect.ValueError{Method: methodName(), Kind: k}) - } -} - -// methodName returns the caller of the function calling methodName -func methodName() string { - pc, _, _, _ := runtime.Caller(2) - f := runtime.FuncForPC(pc) - if f == nil { - return "unknown method" - } - return f.Name() -} - -type typeQueue struct { - t reflect.Type - fi *FieldInfo - pp string // Parent path -} - -// A copying append that creates a new slice each time. -func apnd(is []int, i int) []int { - x := make([]int, len(is)+1) - for p, n := range is { - x[p] = n - } - x[len(x)-1] = i - return x -} - -type mapf func(string) string - -// parseName parses the tag and the target name for the given field using -// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the -// field's name to a target name, and tagMapFunc for mapping the tag to -// a target name. -func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { - // first, set the fieldName to the field's name - fieldName = field.Name - // if a mapFunc is set, use that to override the fieldName - if mapFunc != nil { - fieldName = mapFunc(fieldName) - } - - // if there's no tag to look for, return the field name - if tagName == "" { - return "", fieldName - } - - // if this tag is not set using the normal convention in the tag, - // then return the fieldname.. this check is done because according - // to the reflect documentation: - // If the tag does not have the conventional format, - // the value returned by Get is unspecified. - // which doesn't sound great. - if !strings.Contains(string(field.Tag), tagName+":") { - return "", fieldName - } - - // at this point we're fairly sure that we have a tag, so lets pull it out - tag = field.Tag.Get(tagName) - - // if we have a mapper function, call it on the whole tag - // XXX: this is a change from the old version, which pulled out the name - // before the tagMapFunc could be run, but I think this is the right way - if tagMapFunc != nil { - tag = tagMapFunc(tag) - } - - // finally, split the options from the name - parts := strings.Split(tag, ",") - fieldName = parts[0] - - return tag, fieldName -} - -// parseOptions parses options out of a tag string, skipping the name -func parseOptions(tag string) map[string]string { - parts := strings.Split(tag, ",") - options := make(map[string]string, len(parts)) - if len(parts) > 1 { - for _, opt := range parts[1:] { - // short circuit potentially expensive split op - if strings.Contains(opt, "=") { - kv := strings.Split(opt, "=") - options[kv[0]] = kv[1] - continue - } - options[opt] = "" - } - } - return options -} - -// getMapping returns a mapping for the t type, using the tagName, mapFunc and -// tagMapFunc to determine the canonical names of fields. -func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { - m := []*FieldInfo{} - - root := &FieldInfo{} - queue := []typeQueue{} - queue = append(queue, typeQueue{Deref(t), root, ""}) - -QueueLoop: - for len(queue) != 0 { - // pop the first item off of the queue - tq := queue[0] - queue = queue[1:] - - // ignore recursive field - for p := tq.fi.Parent; p != nil; p = p.Parent { - if tq.fi.Field.Type == p.Field.Type { - continue QueueLoop - } - } - - nChildren := 0 - if tq.t.Kind() == reflect.Struct { - nChildren = tq.t.NumField() - } - tq.fi.Children = make([]*FieldInfo, nChildren) - - // iterate through all of its fields - for fieldPos := 0; fieldPos < nChildren; fieldPos++ { - - f := tq.t.Field(fieldPos) - - // parse the tag and the target name using the mapping options for this field - tag, name := parseName(f, tagName, mapFunc, tagMapFunc) - - // if the name is "-", disabled via a tag, skip it - if name == "-" { - continue - } - - fi := FieldInfo{ - Field: f, - Name: name, - Zero: reflect.New(f.Type).Elem(), - Options: parseOptions(tag), - } - - // if the path is empty this path is just the name - if tq.pp == "" { - fi.Path = fi.Name - } else { - fi.Path = tq.pp + "." + fi.Name - } - - // skip unexported fields - if len(f.PkgPath) != 0 && !f.Anonymous { - continue - } - - // bfs search of anonymous embedded structs - if f.Anonymous { - pp := tq.pp - if tag != "" { - pp = fi.Path - } - - fi.Embedded = true - fi.Index = apnd(tq.fi.Index, fieldPos) - nChildren := 0 - ft := Deref(f.Type) - if ft.Kind() == reflect.Struct { - nChildren = ft.NumField() - } - fi.Children = make([]*FieldInfo, nChildren) - queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) - } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { - fi.Index = apnd(tq.fi.Index, fieldPos) - fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) - queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) - } - - fi.Index = apnd(tq.fi.Index, fieldPos) - fi.Parent = tq.fi - tq.fi.Children[fieldPos] = &fi - m = append(m, &fi) - } - } - - flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} - for _, fi := range flds.Index { - flds.Paths[fi.Path] = fi - if fi.Name != "" && !fi.Embedded { - flds.Names[fi.Path] = fi - } - } - - return flds -} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go deleted file mode 100644 index 3f000f4..0000000 --- a/vendor/github.com/jmoiron/sqlx/sqlx.go +++ /dev/null @@ -1,1045 +0,0 @@ -package sqlx - -import ( - "database/sql" - "database/sql/driver" - "errors" - "fmt" - - "io/ioutil" - "path/filepath" - "reflect" - "strings" - "sync" - - "github.com/jmoiron/sqlx/reflectx" -) - -// Although the NameMapper is convenient, in practice it should not -// be relied on except for application code. If you are writing a library -// that uses sqlx, you should be aware that the name mappings you expect -// can be overridden by your user's application. - -// NameMapper is used to map column names to struct field names. By default, -// it uses strings.ToLower to lowercase struct field names. It can be set -// to whatever you want, but it is encouraged to be set before sqlx is used -// as name-to-field mappings are cached after first use on a type. -var NameMapper = strings.ToLower -var origMapper = reflect.ValueOf(NameMapper) - -// Rather than creating on init, this is created when necessary so that -// importers have time to customize the NameMapper. -var mpr *reflectx.Mapper - -// mprMu protects mpr. -var mprMu sync.Mutex - -// mapper returns a valid mapper using the configured NameMapper func. -func mapper() *reflectx.Mapper { - mprMu.Lock() - defer mprMu.Unlock() - - if mpr == nil { - mpr = reflectx.NewMapperFunc("db", NameMapper) - } else if origMapper != reflect.ValueOf(NameMapper) { - // if NameMapper has changed, create a new mapper - mpr = reflectx.NewMapperFunc("db", NameMapper) - origMapper = reflect.ValueOf(NameMapper) - } - return mpr -} - -// isScannable takes the reflect.Type and the actual dest value and returns -// whether or not it's Scannable. Something is scannable if: -// * it is not a struct -// * it implements sql.Scanner -// * it has no exported fields -func isScannable(t reflect.Type) bool { - if reflect.PtrTo(t).Implements(_scannerInterface) { - return true - } - if t.Kind() != reflect.Struct { - return true - } - - // it's not important that we use the right mapper for this particular object, - // we're only concerned on how many exported fields this struct has - m := mapper() - if len(m.TypeMap(t).Index) == 0 { - return true - } - return false -} - -// ColScanner is an interface used by MapScan and SliceScan -type ColScanner interface { - Columns() ([]string, error) - Scan(dest ...interface{}) error - Err() error -} - -// Queryer is an interface used by Get and Select -type Queryer interface { - Query(query string, args ...interface{}) (*sql.Rows, error) - Queryx(query string, args ...interface{}) (*Rows, error) - QueryRowx(query string, args ...interface{}) *Row -} - -// Execer is an interface used by MustExec and LoadFile -type Execer interface { - Exec(query string, args ...interface{}) (sql.Result, error) -} - -// Binder is an interface for something which can bind queries (Tx, DB) -type binder interface { - DriverName() string - Rebind(string) string - BindNamed(string, interface{}) (string, []interface{}, error) -} - -// Ext is a union interface which can bind, query, and exec, used by -// NamedQuery and NamedExec. -type Ext interface { - binder - Queryer - Execer -} - -// Preparer is an interface used by Preparex. -type Preparer interface { - Prepare(query string) (*sql.Stmt, error) -} - -// determine if any of our extensions are unsafe -func isUnsafe(i interface{}) bool { - switch v := i.(type) { - case Row: - return v.unsafe - case *Row: - return v.unsafe - case Rows: - return v.unsafe - case *Rows: - return v.unsafe - case NamedStmt: - return v.Stmt.unsafe - case *NamedStmt: - return v.Stmt.unsafe - case Stmt: - return v.unsafe - case *Stmt: - return v.unsafe - case qStmt: - return v.unsafe - case *qStmt: - return v.unsafe - case DB: - return v.unsafe - case *DB: - return v.unsafe - case Tx: - return v.unsafe - case *Tx: - return v.unsafe - case sql.Rows, *sql.Rows: - return false - default: - return false - } -} - -func mapperFor(i interface{}) *reflectx.Mapper { - switch i.(type) { - case DB: - return i.(DB).Mapper - case *DB: - return i.(*DB).Mapper - case Tx: - return i.(Tx).Mapper - case *Tx: - return i.(*Tx).Mapper - default: - return mapper() - } -} - -var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() -var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() - -// Row is a reimplementation of sql.Row in order to gain access to the underlying -// sql.Rows.Columns() data, necessary for StructScan. -type Row struct { - err error - unsafe bool - rows *sql.Rows - Mapper *reflectx.Mapper -} - -// Scan is a fixed implementation of sql.Row.Scan, which does not discard the -// underlying error from the internal rows object if it exists. -func (r *Row) Scan(dest ...interface{}) error { - if r.err != nil { - return r.err - } - - // TODO(bradfitz): for now we need to defensively clone all - // []byte that the driver returned (not permitting - // *RawBytes in Rows.Scan), since we're about to close - // the Rows in our defer, when we return from this function. - // the contract with the driver.Next(...) interface is that it - // can return slices into read-only temporary memory that's - // only valid until the next Scan/Close. But the TODO is that - // for a lot of drivers, this copy will be unnecessary. We - // should provide an optional interface for drivers to - // implement to say, "don't worry, the []bytes that I return - // from Next will not be modified again." (for instance, if - // they were obtained from the network anyway) But for now we - // don't care. - defer r.rows.Close() - for _, dp := range dest { - if _, ok := dp.(*sql.RawBytes); ok { - return errors.New("sql: RawBytes isn't allowed on Row.Scan") - } - } - - if !r.rows.Next() { - if err := r.rows.Err(); err != nil { - return err - } - return sql.ErrNoRows - } - err := r.rows.Scan(dest...) - if err != nil { - return err - } - // Make sure the query can be processed to completion with no errors. - if err := r.rows.Close(); err != nil { - return err - } - return nil -} - -// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually -// returned by Row.Scan() -func (r *Row) Columns() ([]string, error) { - if r.err != nil { - return []string{}, r.err - } - return r.rows.Columns() -} - -// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error -func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { - if r.err != nil { - return []*sql.ColumnType{}, r.err - } - return r.rows.ColumnTypes() -} - -// Err returns the error encountered while scanning. -func (r *Row) Err() error { - return r.err -} - -// DB is a wrapper around sql.DB which keeps track of the driverName upon Open, -// used mostly to automatically bind named queries using the right bindvars. -type DB struct { - *sql.DB - driverName string - unsafe bool - Mapper *reflectx.Mapper -} - -// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The -// driverName of the original database is required for named query support. -func NewDb(db *sql.DB, driverName string) *DB { - return &DB{DB: db, driverName: driverName, Mapper: mapper()} -} - -// DriverName returns the driverName passed to the Open function for this DB. -func (db *DB) DriverName() string { - return db.driverName -} - -// Open is the same as sql.Open, but returns an *sqlx.DB instead. -func Open(driverName, dataSourceName string) (*DB, error) { - db, err := sql.Open(driverName, dataSourceName) - if err != nil { - return nil, err - } - return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err -} - -// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. -func MustOpen(driverName, dataSourceName string) *DB { - db, err := Open(driverName, dataSourceName) - if err != nil { - panic(err) - } - return db -} - -// MapperFunc sets a new mapper for this db using the default sqlx struct tag -// and the provided mapper function. -func (db *DB) MapperFunc(mf func(string) string) { - db.Mapper = reflectx.NewMapperFunc("db", mf) -} - -// Rebind transforms a query from QUESTION to the DB driver's bindvar type. -func (db *DB) Rebind(query string) string { - return Rebind(BindType(db.driverName), query) -} - -// Unsafe returns a version of DB which will silently succeed to scan when -// columns in the SQL result have no fields in the destination struct. -// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its -// safety behavior. -func (db *DB) Unsafe() *DB { - return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} -} - -// BindNamed binds a query using the DB driver's bindvar type. -func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { - return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) -} - -// NamedQuery using this DB. -// Any named placeholder parameters are replaced with fields from arg. -func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { - return NamedQuery(db, query, arg) -} - -// NamedExec using this DB. -// Any named placeholder parameters are replaced with fields from arg. -func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { - return NamedExec(db, query, arg) -} - -// Select using this DB. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { - return Select(db, dest, query, args...) -} - -// Get using this DB. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { - return Get(db, dest, query, args...) -} - -// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead -// of an *sql.Tx. -func (db *DB) MustBegin() *Tx { - tx, err := db.Beginx() - if err != nil { - panic(err) - } - return tx -} - -// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. -func (db *DB) Beginx() (*Tx, error) { - tx, err := db.DB.Begin() - if err != nil { - return nil, err - } - return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err -} - -// Queryx queries the database and returns an *sqlx.Rows. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { - r, err := db.DB.Query(query, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err -} - -// QueryRowx queries the database and returns an *sqlx.Row. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) QueryRowx(query string, args ...interface{}) *Row { - rows, err := db.DB.Query(query, args...) - return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} -} - -// MustExec (panic) runs MustExec using this database. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) MustExec(query string, args ...interface{}) sql.Result { - return MustExec(db, query, args...) -} - -// Preparex returns an sqlx.Stmt instead of a sql.Stmt -func (db *DB) Preparex(query string) (*Stmt, error) { - return Preparex(db, query) -} - -// PrepareNamed returns an sqlx.NamedStmt -func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { - return prepareNamed(db, query) -} - -// Tx is an sqlx wrapper around sql.Tx with extra functionality -type Tx struct { - *sql.Tx - driverName string - unsafe bool - Mapper *reflectx.Mapper -} - -// DriverName returns the driverName used by the DB which began this transaction. -func (tx *Tx) DriverName() string { - return tx.driverName -} - -// Rebind a query within a transaction's bindvar type. -func (tx *Tx) Rebind(query string) string { - return Rebind(BindType(tx.driverName), query) -} - -// Unsafe returns a version of Tx which will silently succeed to scan when -// columns in the SQL result have no fields in the destination struct. -func (tx *Tx) Unsafe() *Tx { - return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} -} - -// BindNamed binds a query within a transaction's bindvar type. -func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { - return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) -} - -// NamedQuery within a transaction. -// Any named placeholder parameters are replaced with fields from arg. -func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { - return NamedQuery(tx, query, arg) -} - -// NamedExec a named query within a transaction. -// Any named placeholder parameters are replaced with fields from arg. -func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { - return NamedExec(tx, query, arg) -} - -// Select within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { - return Select(tx, dest, query, args...) -} - -// Queryx within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { - r, err := tx.Tx.Query(query, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err -} - -// QueryRowx within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { - rows, err := tx.Tx.Query(query, args...) - return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} -} - -// Get within a transaction. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { - return Get(tx, dest, query, args...) -} - -// MustExec runs MustExec within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { - return MustExec(tx, query, args...) -} - -// Preparex a statement within a transaction. -func (tx *Tx) Preparex(query string) (*Stmt, error) { - return Preparex(tx, query) -} - -// Stmtx returns a version of the prepared statement which runs within a transaction. Provided -// stmt can be either *sql.Stmt or *sqlx.Stmt. -func (tx *Tx) Stmtx(stmt interface{}) *Stmt { - var s *sql.Stmt - switch v := stmt.(type) { - case Stmt: - s = v.Stmt - case *Stmt: - s = v.Stmt - case *sql.Stmt: - s = v - default: - panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) - } - return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} -} - -// NamedStmt returns a version of the prepared statement which runs within a transaction. -func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { - return &NamedStmt{ - QueryString: stmt.QueryString, - Params: stmt.Params, - Stmt: tx.Stmtx(stmt.Stmt), - } -} - -// PrepareNamed returns an sqlx.NamedStmt -func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { - return prepareNamed(tx, query) -} - -// Stmt is an sqlx wrapper around sql.Stmt with extra functionality -type Stmt struct { - *sql.Stmt - unsafe bool - Mapper *reflectx.Mapper -} - -// Unsafe returns a version of Stmt which will silently succeed to scan when -// columns in the SQL result have no fields in the destination struct. -func (s *Stmt) Unsafe() *Stmt { - return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} -} - -// Select using the prepared statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) Select(dest interface{}, args ...interface{}) error { - return Select(&qStmt{s}, dest, "", args...) -} - -// Get using the prepared statement. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (s *Stmt) Get(dest interface{}, args ...interface{}) error { - return Get(&qStmt{s}, dest, "", args...) -} - -// MustExec (panic) using this statement. Note that the query portion of the error -// output will be blank, as Stmt does not expose its query. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) MustExec(args ...interface{}) sql.Result { - return MustExec(&qStmt{s}, "", args...) -} - -// QueryRowx using this statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) QueryRowx(args ...interface{}) *Row { - qs := &qStmt{s} - return qs.QueryRowx("", args...) -} - -// Queryx using this statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { - qs := &qStmt{s} - return qs.Queryx("", args...) -} - -// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by -// implementing those interfaces and ignoring the `query` argument. -type qStmt struct{ *Stmt } - -func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { - return q.Stmt.Query(args...) -} - -func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { - r, err := q.Stmt.Query(args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err -} - -func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { - rows, err := q.Stmt.Query(args...) - return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} -} - -func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { - return q.Stmt.Exec(args...) -} - -// Rows is a wrapper around sql.Rows which caches costly reflect operations -// during a looped StructScan -type Rows struct { - *sql.Rows - unsafe bool - Mapper *reflectx.Mapper - // these fields cache memory use for a rows during iteration w/ structScan - started bool - fields [][]int - values []interface{} -} - -// SliceScan using this Rows. -func (r *Rows) SliceScan() ([]interface{}, error) { - return SliceScan(r) -} - -// MapScan using this Rows. -func (r *Rows) MapScan(dest map[string]interface{}) error { - return MapScan(r, dest) -} - -// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. -// Use this and iterate over Rows manually when the memory load of Select() might be -// prohibitive. *Rows.StructScan caches the reflect work of matching up column -// positions to fields to avoid that overhead per scan, which means it is not safe -// to run StructScan on the same Rows instance with different struct types. -func (r *Rows) StructScan(dest interface{}) error { - v := reflect.ValueOf(dest) - - if v.Kind() != reflect.Ptr { - return errors.New("must pass a pointer, not a value, to StructScan destination") - } - - v = v.Elem() - - if !r.started { - columns, err := r.Columns() - if err != nil { - return err - } - m := r.Mapper - - r.fields = m.TraversalsByName(v.Type(), columns) - // if we are not unsafe and are missing fields, return an error - if f, err := missingFields(r.fields); err != nil && !r.unsafe { - return fmt.Errorf("missing destination name %s in %T", columns[f], dest) - } - r.values = make([]interface{}, len(columns)) - r.started = true - } - - err := fieldsByTraversal(v, r.fields, r.values, true) - if err != nil { - return err - } - // scan into the struct field pointers and append to our results - err = r.Scan(r.values...) - if err != nil { - return err - } - return r.Err() -} - -// Connect to a database and verify with a ping. -func Connect(driverName, dataSourceName string) (*DB, error) { - db, err := Open(driverName, dataSourceName) - if err != nil { - return nil, err - } - err = db.Ping() - if err != nil { - db.Close() - return nil, err - } - return db, nil -} - -// MustConnect connects to a database and panics on error. -func MustConnect(driverName, dataSourceName string) *DB { - db, err := Connect(driverName, dataSourceName) - if err != nil { - panic(err) - } - return db -} - -// Preparex prepares a statement. -func Preparex(p Preparer, query string) (*Stmt, error) { - s, err := p.Prepare(query) - if err != nil { - return nil, err - } - return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err -} - -// Select executes a query using the provided Queryer, and StructScans each row -// into dest, which must be a slice. If the slice elements are scannable, then -// the result set must have only one column. Otherwise, StructScan is used. -// The *sql.Rows are closed automatically. -// Any placeholder parameters are replaced with supplied args. -func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { - rows, err := q.Queryx(query, args...) - if err != nil { - return err - } - // if something happens here, we want to make sure the rows are Closed - defer rows.Close() - return scanAll(rows, dest, false) -} - -// Get does a QueryRow using the provided Queryer, and scans the resulting row -// to dest. If dest is scannable, the result must only have one column. Otherwise, -// StructScan is used. Get will return sql.ErrNoRows like row.Scan would. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { - r := q.QueryRowx(query, args...) - return r.scanAny(dest, false) -} - -// LoadFile exec's every statement in a file (as a single call to Exec). -// LoadFile may return a nil *sql.Result if errors are encountered locating or -// reading the file at path. LoadFile reads the entire file into memory, so it -// is not suitable for loading large data dumps, but can be useful for initializing -// schemas or loading indexes. -// -// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 -// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting -// this by requiring something with DriverName() and then attempting to split the -// queries will be difficult to get right, and its current driver-specific behavior -// is deemed at least not complex in its incorrectness. -func LoadFile(e Execer, path string) (*sql.Result, error) { - realpath, err := filepath.Abs(path) - if err != nil { - return nil, err - } - contents, err := ioutil.ReadFile(realpath) - if err != nil { - return nil, err - } - res, err := e.Exec(string(contents)) - return &res, err -} - -// MustExec execs the query using e and panics if there was an error. -// Any placeholder parameters are replaced with supplied args. -func MustExec(e Execer, query string, args ...interface{}) sql.Result { - res, err := e.Exec(query, args...) - if err != nil { - panic(err) - } - return res -} - -// SliceScan using this Rows. -func (r *Row) SliceScan() ([]interface{}, error) { - return SliceScan(r) -} - -// MapScan using this Rows. -func (r *Row) MapScan(dest map[string]interface{}) error { - return MapScan(r, dest) -} - -func (r *Row) scanAny(dest interface{}, structOnly bool) error { - if r.err != nil { - return r.err - } - if r.rows == nil { - r.err = sql.ErrNoRows - return r.err - } - defer r.rows.Close() - - v := reflect.ValueOf(dest) - if v.Kind() != reflect.Ptr { - return errors.New("must pass a pointer, not a value, to StructScan destination") - } - if v.IsNil() { - return errors.New("nil pointer passed to StructScan destination") - } - - base := reflectx.Deref(v.Type()) - scannable := isScannable(base) - - if structOnly && scannable { - return structOnlyError(base) - } - - columns, err := r.Columns() - if err != nil { - return err - } - - if scannable && len(columns) > 1 { - return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) - } - - if scannable { - return r.Scan(dest) - } - - m := r.Mapper - - fields := m.TraversalsByName(v.Type(), columns) - // if we are not unsafe and are missing fields, return an error - if f, err := missingFields(fields); err != nil && !r.unsafe { - return fmt.Errorf("missing destination name %s in %T", columns[f], dest) - } - values := make([]interface{}, len(columns)) - - err = fieldsByTraversal(v, fields, values, true) - if err != nil { - return err - } - // scan into the struct field pointers and append to our results - return r.Scan(values...) -} - -// StructScan a single Row into dest. -func (r *Row) StructScan(dest interface{}) error { - return r.scanAny(dest, true) -} - -// SliceScan a row, returning a []interface{} with values similar to MapScan. -// This function is primarily intended for use where the number of columns -// is not known. Because you can pass an []interface{} directly to Scan, -// it's recommended that you do that as it will not have to allocate new -// slices per row. -func SliceScan(r ColScanner) ([]interface{}, error) { - // ignore r.started, since we needn't use reflect for anything. - columns, err := r.Columns() - if err != nil { - return []interface{}{}, err - } - - values := make([]interface{}, len(columns)) - for i := range values { - values[i] = new(interface{}) - } - - err = r.Scan(values...) - - if err != nil { - return values, err - } - - for i := range columns { - values[i] = *(values[i].(*interface{})) - } - - return values, r.Err() -} - -// MapScan scans a single Row into the dest map[string]interface{}. -// Use this to get results for SQL that might not be under your control -// (for instance, if you're building an interface for an SQL server that -// executes SQL from input). Please do not use this as a primary interface! -// This will modify the map sent to it in place, so reuse the same map with -// care. Columns which occur more than once in the result will overwrite -// each other! -func MapScan(r ColScanner, dest map[string]interface{}) error { - // ignore r.started, since we needn't use reflect for anything. - columns, err := r.Columns() - if err != nil { - return err - } - - values := make([]interface{}, len(columns)) - for i := range values { - values[i] = new(interface{}) - } - - err = r.Scan(values...) - if err != nil { - return err - } - - for i, column := range columns { - dest[column] = *(values[i].(*interface{})) - } - - return r.Err() -} - -type rowsi interface { - Close() error - Columns() ([]string, error) - Err() error - Next() bool - Scan(...interface{}) error -} - -// structOnlyError returns an error appropriate for type when a non-scannable -// struct is expected but something else is given -func structOnlyError(t reflect.Type) error { - isStruct := t.Kind() == reflect.Struct - isScanner := reflect.PtrTo(t).Implements(_scannerInterface) - if !isStruct { - return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) - } - if isScanner { - return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) - } - return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) -} - -// scanAll scans all rows into a destination, which must be a slice of any -// type. If the destination slice type is a Struct, then StructScan will be -// used on each row. If the destination is some other kind of base type, then -// each row must only have one column which can scan into that type. This -// allows you to do something like: -// -// rows, _ := db.Query("select id from people;") -// var ids []int -// scanAll(rows, &ids, false) -// -// and ids will be a list of the id results. I realize that this is a desirable -// interface to expose to users, but for now it will only be exposed via changes -// to `Get` and `Select`. The reason that this has been implemented like this is -// this is the only way to not duplicate reflect work in the new API while -// maintaining backwards compatibility. -func scanAll(rows rowsi, dest interface{}, structOnly bool) error { - var v, vp reflect.Value - - value := reflect.ValueOf(dest) - - // json.Unmarshal returns errors for these - if value.Kind() != reflect.Ptr { - return errors.New("must pass a pointer, not a value, to StructScan destination") - } - if value.IsNil() { - return errors.New("nil pointer passed to StructScan destination") - } - direct := reflect.Indirect(value) - - slice, err := baseType(value.Type(), reflect.Slice) - if err != nil { - return err - } - - isPtr := slice.Elem().Kind() == reflect.Ptr - base := reflectx.Deref(slice.Elem()) - scannable := isScannable(base) - - if structOnly && scannable { - return structOnlyError(base) - } - - columns, err := rows.Columns() - if err != nil { - return err - } - - // if it's a base type make sure it only has 1 column; if not return an error - if scannable && len(columns) > 1 { - return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) - } - - if !scannable { - var values []interface{} - var m *reflectx.Mapper - - switch rows.(type) { - case *Rows: - m = rows.(*Rows).Mapper - default: - m = mapper() - } - - fields := m.TraversalsByName(base, columns) - // if we are not unsafe and are missing fields, return an error - if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { - return fmt.Errorf("missing destination name %s in %T", columns[f], dest) - } - values = make([]interface{}, len(columns)) - - for rows.Next() { - // create a new struct type (which returns PtrTo) and indirect it - vp = reflect.New(base) - v = reflect.Indirect(vp) - - err = fieldsByTraversal(v, fields, values, true) - if err != nil { - return err - } - - // scan into the struct field pointers and append to our results - err = rows.Scan(values...) - if err != nil { - return err - } - - if isPtr { - direct.Set(reflect.Append(direct, vp)) - } else { - direct.Set(reflect.Append(direct, v)) - } - } - } else { - for rows.Next() { - vp = reflect.New(base) - err = rows.Scan(vp.Interface()) - if err != nil { - return err - } - // append - if isPtr { - direct.Set(reflect.Append(direct, vp)) - } else { - direct.Set(reflect.Append(direct, reflect.Indirect(vp))) - } - } - } - - return rows.Err() -} - -// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately -// it doesn't really feel like it's named properly. There is an incongruency -// between this and the way that StructScan (which might better be ScanStruct -// anyway) works on a rows object. - -// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. -// StructScan will scan in the entire rows result, so if you do not want to -// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. -// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. -func StructScan(rows rowsi, dest interface{}) error { - return scanAll(rows, dest, true) - -} - -// reflect helpers - -func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { - t = reflectx.Deref(t) - if t.Kind() != expected { - return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) - } - return t, nil -} - -// fieldsByName fills a values interface with fields from the passed value based -// on the traversals in int. If ptrs is true, return addresses instead of values. -// We write this instead of using FieldsByName to save allocations and map lookups -// when iterating over many rows. Empty traversals will get an interface pointer. -// Because of the necessity of requesting ptrs or values, it's considered a bit too -// specialized for inclusion in reflectx itself. -func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { - v = reflect.Indirect(v) - if v.Kind() != reflect.Struct { - return errors.New("argument not a struct") - } - - for i, traversal := range traversals { - if len(traversal) == 0 { - values[i] = new(interface{}) - continue - } - f := reflectx.FieldByIndexes(v, traversal) - if ptrs { - values[i] = f.Addr().Interface() - } else { - values[i] = f.Interface() - } - } - return nil -} - -func missingFields(transversals [][]int) (field int, err error) { - for i, t := range transversals { - if len(t) == 0 { - return i, errors.New("missing field") - } - } - return 0, nil -} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go deleted file mode 100644 index 0603311..0000000 --- a/vendor/github.com/jmoiron/sqlx/sqlx_context.go +++ /dev/null @@ -1,346 +0,0 @@ -// +build go1.8 - -package sqlx - -import ( - "context" - "database/sql" - "fmt" - "io/ioutil" - "path/filepath" - "reflect" -) - -// ConnectContext to a database and verify with a ping. -func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { - db, err := Open(driverName, dataSourceName) - if err != nil { - return db, err - } - err = db.PingContext(ctx) - return db, err -} - -// QueryerContext is an interface used by GetContext and SelectContext -type QueryerContext interface { - QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) - QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) - QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row -} - -// PreparerContext is an interface used by PreparexContext. -type PreparerContext interface { - PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) -} - -// ExecerContext is an interface used by MustExecContext and LoadFileContext -type ExecerContext interface { - ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) -} - -// ExtContext is a union interface which can bind, query, and exec, with Context -// used by NamedQueryContext and NamedExecContext. -type ExtContext interface { - binder - QueryerContext - ExecerContext -} - -// SelectContext executes a query using the provided Queryer, and StructScans -// each row into dest, which must be a slice. If the slice elements are -// scannable, then the result set must have only one column. Otherwise, -// StructScan is used. The *sql.Rows are closed automatically. -// Any placeholder parameters are replaced with supplied args. -func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { - rows, err := q.QueryxContext(ctx, query, args...) - if err != nil { - return err - } - // if something happens here, we want to make sure the rows are Closed - defer rows.Close() - return scanAll(rows, dest, false) -} - -// PreparexContext prepares a statement. -// -// The provided context is used for the preparation of the statement, not for -// the execution of the statement. -func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { - s, err := p.PrepareContext(ctx, query) - if err != nil { - return nil, err - } - return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err -} - -// GetContext does a QueryRow using the provided Queryer, and scans the -// resulting row to dest. If dest is scannable, the result must only have one -// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like -// row.Scan would. Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { - r := q.QueryRowxContext(ctx, query, args...) - return r.scanAny(dest, false) -} - -// LoadFileContext exec's every statement in a file (as a single call to Exec). -// LoadFileContext may return a nil *sql.Result if errors are encountered -// locating or reading the file at path. LoadFile reads the entire file into -// memory, so it is not suitable for loading large data dumps, but can be useful -// for initializing schemas or loading indexes. -// -// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 -// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting -// this by requiring something with DriverName() and then attempting to split the -// queries will be difficult to get right, and its current driver-specific behavior -// is deemed at least not complex in its incorrectness. -func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { - realpath, err := filepath.Abs(path) - if err != nil { - return nil, err - } - contents, err := ioutil.ReadFile(realpath) - if err != nil { - return nil, err - } - res, err := e.ExecContext(ctx, string(contents)) - return &res, err -} - -// MustExecContext execs the query using e and panics if there was an error. -// Any placeholder parameters are replaced with supplied args. -func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { - res, err := e.ExecContext(ctx, query, args...) - if err != nil { - panic(err) - } - return res -} - -// PrepareNamedContext returns an sqlx.NamedStmt -func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { - return prepareNamedContext(ctx, db, query) -} - -// NamedQueryContext using this DB. -// Any named placeholder parameters are replaced with fields from arg. -func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { - return NamedQueryContext(ctx, db, query, arg) -} - -// NamedExecContext using this DB. -// Any named placeholder parameters are replaced with fields from arg. -func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { - return NamedExecContext(ctx, db, query, arg) -} - -// SelectContext using this DB. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - return SelectContext(ctx, db, dest, query, args...) -} - -// GetContext using this DB. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - return GetContext(ctx, db, dest, query, args...) -} - -// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. -// -// The provided context is used for the preparation of the statement, not for -// the execution of the statement. -func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { - return PreparexContext(ctx, db, query) -} - -// QueryxContext queries the database and returns an *sqlx.Rows. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { - r, err := db.DB.QueryContext(ctx, query, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err -} - -// QueryRowxContext queries the database and returns an *sqlx.Row. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { - rows, err := db.DB.QueryContext(ctx, query, args...) - return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} -} - -// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead -// of an *sql.Tx. -// -// The provided context is used until the transaction is committed or rolled -// back. If the context is canceled, the sql package will roll back the -// transaction. Tx.Commit will return an error if the context provided to -// MustBeginContext is canceled. -func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { - tx, err := db.BeginTxx(ctx, opts) - if err != nil { - panic(err) - } - return tx -} - -// MustExecContext (panic) runs MustExec using this database. -// Any placeholder parameters are replaced with supplied args. -func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { - return MustExecContext(ctx, db, query, args...) -} - -// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an -// *sql.Tx. -// -// The provided context is used until the transaction is committed or rolled -// back. If the context is canceled, the sql package will roll back the -// transaction. Tx.Commit will return an error if the context provided to -// BeginxContext is canceled. -func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { - tx, err := db.DB.BeginTx(ctx, opts) - if err != nil { - return nil, err - } - return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err -} - -// StmtxContext returns a version of the prepared statement which runs within a -// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. -func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { - var s *sql.Stmt - switch v := stmt.(type) { - case Stmt: - s = v.Stmt - case *Stmt: - s = v.Stmt - case *sql.Stmt: - s = v - default: - panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) - } - return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} -} - -// NamedStmtContext returns a version of the prepared statement which runs -// within a transaction. -func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { - return &NamedStmt{ - QueryString: stmt.QueryString, - Params: stmt.Params, - Stmt: tx.StmtxContext(ctx, stmt.Stmt), - } -} - -// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. -// -// The provided context is used for the preparation of the statement, not for -// the execution of the statement. -func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { - return PreparexContext(ctx, tx, query) -} - -// PrepareNamedContext returns an sqlx.NamedStmt -func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { - return prepareNamedContext(ctx, tx, query) -} - -// MustExecContext runs MustExecContext within a transaction. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { - return MustExecContext(ctx, tx, query, args...) -} - -// QueryxContext within a transaction and context. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { - r, err := tx.Tx.QueryContext(ctx, query, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err -} - -// SelectContext within a transaction and context. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - return SelectContext(ctx, tx, dest, query, args...) -} - -// GetContext within a transaction and context. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - return GetContext(ctx, tx, dest, query, args...) -} - -// QueryRowxContext within a transaction and context. -// Any placeholder parameters are replaced with supplied args. -func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { - rows, err := tx.Tx.QueryContext(ctx, query, args...) - return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} -} - -// NamedExecContext using this Tx. -// Any named placeholder parameters are replaced with fields from arg. -func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { - return NamedExecContext(ctx, tx, query, arg) -} - -// SelectContext using the prepared statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { - return SelectContext(ctx, &qStmt{s}, dest, "", args...) -} - -// GetContext using the prepared statement. -// Any placeholder parameters are replaced with supplied args. -// An error is returned if the result set is empty. -func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { - return GetContext(ctx, &qStmt{s}, dest, "", args...) -} - -// MustExecContext (panic) using this statement. Note that the query portion of -// the error output will be blank, as Stmt does not expose its query. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { - return MustExecContext(ctx, &qStmt{s}, "", args...) -} - -// QueryRowxContext using this statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { - qs := &qStmt{s} - return qs.QueryRowxContext(ctx, "", args...) -} - -// QueryxContext using this statement. -// Any placeholder parameters are replaced with supplied args. -func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { - qs := &qStmt{s} - return qs.QueryxContext(ctx, "", args...) -} - -func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { - return q.Stmt.QueryContext(ctx, args...) -} - -func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { - r, err := q.Stmt.QueryContext(ctx, args...) - if err != nil { - return nil, err - } - return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err -} - -func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { - rows, err := q.Stmt.QueryContext(ctx, args...) - return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} -} - -func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { - return q.Stmt.ExecContext(ctx, args...) -} diff --git a/vendor/github.com/syndtr/goleveldb/LICENSE b/vendor/github.com/syndtr/goleveldb/LICENSE new file mode 100644 index 0000000..4a772d1 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/LICENSE @@ -0,0 +1,24 @@ +Copyright 2012 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go new file mode 100644 index 0000000..2259200 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go @@ -0,0 +1,349 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrBatchCorrupted records reason of batch corruption. This error will be +// wrapped with errors.ErrCorrupted. +type ErrBatchCorrupted struct { + Reason string +} + +func (e *ErrBatchCorrupted) Error() string { + return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) +} + +func newErrBatchCorrupted(reason string) error { + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason}) +} + +const ( + batchHeaderLen = 8 + 4 + batchGrowRec = 3000 + batchBufioSize = 16 +) + +// BatchReplay wraps basic batch operations. +type BatchReplay interface { + Put(key, value []byte) + Delete(key []byte) +} + +type batchIndex struct { + keyType keyType + keyPos, keyLen int + valuePos, valueLen int +} + +func (index batchIndex) k(data []byte) []byte { + return data[index.keyPos : index.keyPos+index.keyLen] +} + +func (index batchIndex) v(data []byte) []byte { + if index.valueLen != 0 { + return data[index.valuePos : index.valuePos+index.valueLen] + } + return nil +} + +func (index batchIndex) kv(data []byte) (key, value []byte) { + return index.k(data), index.v(data) +} + +// Batch is a write batch. +type Batch struct { + data []byte + index []batchIndex + + // internalLen is sums of key/value pair length plus 8-bytes internal key. + internalLen int +} + +func (b *Batch) grow(n int) { + o := len(b.data) + if cap(b.data)-o < n { + div := 1 + if len(b.index) > batchGrowRec { + div = len(b.index) / batchGrowRec + } + ndata := make([]byte, o, o+n+o/div) + copy(ndata, b.data) + b.data = ndata + } +} + +func (b *Batch) appendRec(kt keyType, key, value []byte) { + n := 1 + binary.MaxVarintLen32 + len(key) + if kt == keyTypeVal { + n += binary.MaxVarintLen32 + len(value) + } + b.grow(n) + index := batchIndex{keyType: kt} + o := len(b.data) + data := b.data[:o+n] + data[o] = byte(kt) + o++ + o += binary.PutUvarint(data[o:], uint64(len(key))) + index.keyPos = o + index.keyLen = len(key) + o += copy(data[o:], key) + if kt == keyTypeVal { + o += binary.PutUvarint(data[o:], uint64(len(value))) + index.valuePos = o + index.valueLen = len(value) + o += copy(data[o:], value) + } + b.data = data[:o] + b.index = append(b.index, index) + b.internalLen += index.keyLen + index.valueLen + 8 +} + +// Put appends 'put operation' of the given key/value pair to the batch. +// It is safe to modify the contents of the argument after Put returns but not +// before. +func (b *Batch) Put(key, value []byte) { + b.appendRec(keyTypeVal, key, value) +} + +// Delete appends 'delete operation' of the given key to the batch. +// It is safe to modify the contents of the argument after Delete returns but +// not before. +func (b *Batch) Delete(key []byte) { + b.appendRec(keyTypeDel, key, nil) +} + +// Dump dumps batch contents. The returned slice can be loaded into the +// batch using Load method. +// The returned slice is not its own copy, so the contents should not be +// modified. +func (b *Batch) Dump() []byte { + return b.data +} + +// Load loads given slice into the batch. Previous contents of the batch +// will be discarded. +// The given slice will not be copied and will be used as batch buffer, so +// it is not safe to modify the contents of the slice. +func (b *Batch) Load(data []byte) error { + return b.decode(data, -1) +} + +// Replay replays batch contents. +func (b *Batch) Replay(r BatchReplay) error { + for _, index := range b.index { + switch index.keyType { + case keyTypeVal: + r.Put(index.k(b.data), index.v(b.data)) + case keyTypeDel: + r.Delete(index.k(b.data)) + } + } + return nil +} + +// Len returns number of records in the batch. +func (b *Batch) Len() int { + return len(b.index) +} + +// Reset resets the batch. +func (b *Batch) Reset() { + b.data = b.data[:0] + b.index = b.index[:0] + b.internalLen = 0 +} + +func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error { + for i, index := range b.index { + if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil { + return err + } + } + return nil +} + +func (b *Batch) append(p *Batch) { + ob := len(b.data) + oi := len(b.index) + b.data = append(b.data, p.data...) + b.index = append(b.index, p.index...) + b.internalLen += p.internalLen + + // Updating index offset. + if ob != 0 { + for ; oi < len(b.index); oi++ { + index := &b.index[oi] + index.keyPos += ob + if index.valueLen != 0 { + index.valuePos += ob + } + } + } +} + +func (b *Batch) decode(data []byte, expectedLen int) error { + b.data = data + b.index = b.index[:0] + b.internalLen = 0 + err := decodeBatch(data, func(i int, index batchIndex) error { + b.index = append(b.index, index) + b.internalLen += index.keyLen + index.valueLen + 8 + return nil + }) + if err != nil { + return err + } + if expectedLen >= 0 && len(b.index) != expectedLen { + return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index))) + } + return nil +} + +func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error { + var ik []byte + for i, index := range b.index { + ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) + if err := mdb.Put(ik, index.v(b.data)); err != nil { + return err + } + } + return nil +} + +func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error { + var ik []byte + for i, index := range b.index { + ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) + if err := mdb.Delete(ik); err != nil { + return err + } + } + return nil +} + +func newBatch() interface{} { + return &Batch{} +} + +func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error { + var index batchIndex + for i, o := 0, 0; o < len(data); i++ { + // Key type. + index.keyType = keyType(data[o]) + if index.keyType > keyTypeVal { + return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType))) + } + o++ + + // Key. + x, n := binary.Uvarint(data[o:]) + o += n + if n <= 0 || o+int(x) > len(data) { + return newErrBatchCorrupted("bad record: invalid key length") + } + index.keyPos = o + index.keyLen = int(x) + o += index.keyLen + + // Value. + if index.keyType == keyTypeVal { + x, n = binary.Uvarint(data[o:]) + o += n + if n <= 0 || o+int(x) > len(data) { + return newErrBatchCorrupted("bad record: invalid value length") + } + index.valuePos = o + index.valueLen = int(x) + o += index.valueLen + } else { + index.valuePos = 0 + index.valueLen = 0 + } + + if err := fn(i, index); err != nil { + return err + } + } + return nil +} + +func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) { + seq, batchLen, err = decodeBatchHeader(data) + if err != nil { + return 0, 0, err + } + if seq < expectSeq { + return 0, 0, newErrBatchCorrupted("invalid sequence number") + } + data = data[batchHeaderLen:] + var ik []byte + var decodedLen int + err = decodeBatch(data, func(i int, index batchIndex) error { + if i >= batchLen { + return newErrBatchCorrupted("invalid records length") + } + ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType) + if err := mdb.Put(ik, index.v(data)); err != nil { + return err + } + decodedLen++ + return nil + }) + if err == nil && decodedLen != batchLen { + err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen)) + } + return +} + +func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte { + dst = ensureBuffer(dst, batchHeaderLen) + binary.LittleEndian.PutUint64(dst, seq) + binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen)) + return dst +} + +func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) { + if len(data) < batchHeaderLen { + return 0, 0, newErrBatchCorrupted("too short") + } + + seq = binary.LittleEndian.Uint64(data) + batchLen = int(binary.LittleEndian.Uint32(data[8:])) + if batchLen < 0 { + return 0, 0, newErrBatchCorrupted("invalid records length") + } + return +} + +func batchesLen(batches []*Batch) int { + batchLen := 0 + for _, batch := range batches { + batchLen += batch.Len() + } + return batchLen +} + +func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error { + if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil { + return err + } + for _, batch := range batches { + if _, err := wr.Write(batch.data); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go new file mode 100644 index 0000000..c36ad32 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go @@ -0,0 +1,704 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package cache provides interface and implementation of a cache algorithms. +package cache + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Cacher provides interface to implements a caching functionality. +// An implementation must be safe for concurrent use. +type Cacher interface { + // Capacity returns cache capacity. + Capacity() int + + // SetCapacity sets cache capacity. + SetCapacity(capacity int) + + // Promote promotes the 'cache node'. + Promote(n *Node) + + // Ban evicts the 'cache node' and prevent subsequent 'promote'. + Ban(n *Node) + + // Evict evicts the 'cache node'. + Evict(n *Node) + + // EvictNS evicts 'cache node' with the given namespace. + EvictNS(ns uint64) + + // EvictAll evicts all 'cache node'. + EvictAll() + + // Close closes the 'cache tree' + Close() error +} + +// Value is a 'cacheable object'. It may implements util.Releaser, if +// so the the Release method will be called once object is released. +type Value interface{} + +// NamespaceGetter provides convenient wrapper for namespace. +type NamespaceGetter struct { + Cache *Cache + NS uint64 +} + +// Get simply calls Cache.Get() method. +func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { + return g.Cache.Get(g.NS, key, setFunc) +} + +// The hash tables implementation is based on: +// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, +// Kunlong Zhang, and Michael Spear. +// ACM Symposium on Principles of Distributed Computing, Jul 2014. + +const ( + mInitialSize = 1 << 4 + mOverflowThreshold = 1 << 5 + mOverflowGrowThreshold = 1 << 7 +) + +type mBucket struct { + mu sync.Mutex + node []*Node + frozen bool +} + +func (b *mBucket) freeze() []*Node { + b.mu.Lock() + defer b.mu.Unlock() + if !b.frozen { + b.frozen = true + } + return b.node +} + +func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { + b.mu.Lock() + + if b.frozen { + b.mu.Unlock() + return + } + + // Scan the node. + for _, n := range b.node { + if n.hash == hash && n.ns == ns && n.key == key { + atomic.AddInt32(&n.ref, 1) + b.mu.Unlock() + return true, false, n + } + } + + // Get only. + if noset { + b.mu.Unlock() + return true, false, nil + } + + // Create node. + n = &Node{ + r: r, + hash: hash, + ns: ns, + key: key, + ref: 1, + } + // Add node to bucket. + b.node = append(b.node, n) + bLen := len(b.node) + b.mu.Unlock() + + // Update counter. + grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold + if bLen > mOverflowThreshold { + grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold + } + + // Grow. + if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { + nhLen := len(h.buckets) << 1 + nh := &mNode{ + buckets: make([]unsafe.Pointer, nhLen), + mask: uint32(nhLen) - 1, + pred: unsafe.Pointer(h), + growThreshold: int32(nhLen * mOverflowThreshold), + shrinkThreshold: int32(nhLen >> 1), + } + ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) + if !ok { + panic("BUG: failed swapping head") + } + go nh.initBuckets() + } + + return true, true, n +} + +func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { + b.mu.Lock() + + if b.frozen { + b.mu.Unlock() + return + } + + // Scan the node. + var ( + n *Node + bLen int + ) + for i := range b.node { + n = b.node[i] + if n.ns == ns && n.key == key { + if atomic.LoadInt32(&n.ref) == 0 { + deleted = true + + // Call releaser. + if n.value != nil { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + n.value = nil + } + + // Remove node from bucket. + b.node = append(b.node[:i], b.node[i+1:]...) + bLen = len(b.node) + } + break + } + } + b.mu.Unlock() + + if deleted { + // Call OnDel. + for _, f := range n.onDel { + f() + } + + // Update counter. + atomic.AddInt32(&r.size, int32(n.size)*-1) + shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold + if bLen >= mOverflowThreshold { + atomic.AddInt32(&h.overflow, -1) + } + + // Shrink. + if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { + nhLen := len(h.buckets) >> 1 + nh := &mNode{ + buckets: make([]unsafe.Pointer, nhLen), + mask: uint32(nhLen) - 1, + pred: unsafe.Pointer(h), + growThreshold: int32(nhLen * mOverflowThreshold), + shrinkThreshold: int32(nhLen >> 1), + } + ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) + if !ok { + panic("BUG: failed swapping head") + } + go nh.initBuckets() + } + } + + return true, deleted +} + +type mNode struct { + buckets []unsafe.Pointer // []*mBucket + mask uint32 + pred unsafe.Pointer // *mNode + resizeInProgess int32 + + overflow int32 + growThreshold int32 + shrinkThreshold int32 +} + +func (n *mNode) initBucket(i uint32) *mBucket { + if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { + return b + } + + p := (*mNode)(atomic.LoadPointer(&n.pred)) + if p != nil { + var node []*Node + if n.mask > p.mask { + // Grow. + pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) + if pb == nil { + pb = p.initBucket(i & p.mask) + } + m := pb.freeze() + // Split nodes. + for _, x := range m { + if x.hash&n.mask == i { + node = append(node, x) + } + } + } else { + // Shrink. + pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) + if pb0 == nil { + pb0 = p.initBucket(i) + } + pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) + if pb1 == nil { + pb1 = p.initBucket(i + uint32(len(n.buckets))) + } + m0 := pb0.freeze() + m1 := pb1.freeze() + // Merge nodes. + node = make([]*Node, 0, len(m0)+len(m1)) + node = append(node, m0...) + node = append(node, m1...) + } + b := &mBucket{node: node} + if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { + if len(node) > mOverflowThreshold { + atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) + } + return b + } + } + + return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) +} + +func (n *mNode) initBuckets() { + for i := range n.buckets { + n.initBucket(uint32(i)) + } + atomic.StorePointer(&n.pred, nil) +} + +// Cache is a 'cache map'. +type Cache struct { + mu sync.RWMutex + mHead unsafe.Pointer // *mNode + nodes int32 + size int32 + cacher Cacher + closed bool +} + +// NewCache creates a new 'cache map'. The cacher is optional and +// may be nil. +func NewCache(cacher Cacher) *Cache { + h := &mNode{ + buckets: make([]unsafe.Pointer, mInitialSize), + mask: mInitialSize - 1, + growThreshold: int32(mInitialSize * mOverflowThreshold), + shrinkThreshold: 0, + } + for i := range h.buckets { + h.buckets[i] = unsafe.Pointer(&mBucket{}) + } + r := &Cache{ + mHead: unsafe.Pointer(h), + cacher: cacher, + } + return r +} + +func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { + h := (*mNode)(atomic.LoadPointer(&r.mHead)) + i := hash & h.mask + b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) + if b == nil { + b = h.initBucket(i) + } + return h, b +} + +func (r *Cache) delete(n *Node) bool { + for { + h, b := r.getBucket(n.hash) + done, deleted := b.delete(r, h, n.hash, n.ns, n.key) + if done { + return deleted + } + } +} + +// Nodes returns number of 'cache node' in the map. +func (r *Cache) Nodes() int { + return int(atomic.LoadInt32(&r.nodes)) +} + +// Size returns sums of 'cache node' size in the map. +func (r *Cache) Size() int { + return int(atomic.LoadInt32(&r.size)) +} + +// Capacity returns cache capacity. +func (r *Cache) Capacity() int { + if r.cacher == nil { + return 0 + } + return r.cacher.Capacity() +} + +// SetCapacity sets cache capacity. +func (r *Cache) SetCapacity(capacity int) { + if r.cacher != nil { + r.cacher.SetCapacity(capacity) + } +} + +// Get gets 'cache node' with the given namespace and key. +// If cache node is not found and setFunc is not nil, Get will atomically creates +// the 'cache node' by calling setFunc. Otherwise Get will returns nil. +// +// The returned 'cache handle' should be released after use by calling Release +// method. +func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return nil + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) + if done { + if n != nil { + n.mu.Lock() + if n.value == nil { + if setFunc == nil { + n.mu.Unlock() + n.unref() + return nil + } + + n.size, n.value = setFunc() + if n.value == nil { + n.size = 0 + n.mu.Unlock() + n.unref() + return nil + } + atomic.AddInt32(&r.size, int32(n.size)) + } + n.mu.Unlock() + if r.cacher != nil { + r.cacher.Promote(n) + } + return &Handle{unsafe.Pointer(n)} + } + + break + } + } + return nil +} + +// Delete removes and ban 'cache node' with the given namespace and key. +// A banned 'cache node' will never inserted into the 'cache tree'. Ban +// only attributed to the particular 'cache node', so when a 'cache node' +// is recreated it will not be banned. +// +// If onDel is not nil, then it will be executed if such 'cache node' +// doesn't exist or once the 'cache node' is released. +// +// Delete return true is such 'cache node' exist. +func (r *Cache) Delete(ns, key uint64, onDel func()) bool { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return false + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, true) + if done { + if n != nil { + if onDel != nil { + n.mu.Lock() + n.onDel = append(n.onDel, onDel) + n.mu.Unlock() + } + if r.cacher != nil { + r.cacher.Ban(n) + } + n.unref() + return true + } + + break + } + } + + if onDel != nil { + onDel() + } + + return false +} + +// Evict evicts 'cache node' with the given namespace and key. This will +// simply call Cacher.Evict. +// +// Evict return true is such 'cache node' exist. +func (r *Cache) Evict(ns, key uint64) bool { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return false + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, true) + if done { + if n != nil { + if r.cacher != nil { + r.cacher.Evict(n) + } + n.unref() + return true + } + + break + } + } + + return false +} + +// EvictNS evicts 'cache node' with the given namespace. This will +// simply call Cacher.EvictNS. +func (r *Cache) EvictNS(ns uint64) { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return + } + + if r.cacher != nil { + r.cacher.EvictNS(ns) + } +} + +// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. +func (r *Cache) EvictAll() { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return + } + + if r.cacher != nil { + r.cacher.EvictAll() + } +} + +// Close closes the 'cache map' and forcefully releases all 'cache node'. +func (r *Cache) Close() error { + r.mu.Lock() + if !r.closed { + r.closed = true + + h := (*mNode)(r.mHead) + h.initBuckets() + + for i := range h.buckets { + b := (*mBucket)(h.buckets[i]) + for _, n := range b.node { + // Call releaser. + if n.value != nil { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + n.value = nil + } + + // Call OnDel. + for _, f := range n.onDel { + f() + } + n.onDel = nil + } + } + } + r.mu.Unlock() + + // Avoid deadlock. + if r.cacher != nil { + if err := r.cacher.Close(); err != nil { + return err + } + } + return nil +} + +// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but +// unlike Close it doesn't forcefully releases 'cache node'. +func (r *Cache) CloseWeak() error { + r.mu.Lock() + if !r.closed { + r.closed = true + } + r.mu.Unlock() + + // Avoid deadlock. + if r.cacher != nil { + r.cacher.EvictAll() + if err := r.cacher.Close(); err != nil { + return err + } + } + return nil +} + +// Node is a 'cache node'. +type Node struct { + r *Cache + + hash uint32 + ns, key uint64 + + mu sync.Mutex + size int + value Value + + ref int32 + onDel []func() + + CacheData unsafe.Pointer +} + +// NS returns this 'cache node' namespace. +func (n *Node) NS() uint64 { + return n.ns +} + +// Key returns this 'cache node' key. +func (n *Node) Key() uint64 { + return n.key +} + +// Size returns this 'cache node' size. +func (n *Node) Size() int { + return n.size +} + +// Value returns this 'cache node' value. +func (n *Node) Value() Value { + return n.value +} + +// Ref returns this 'cache node' ref counter. +func (n *Node) Ref() int32 { + return atomic.LoadInt32(&n.ref) +} + +// GetHandle returns an handle for this 'cache node'. +func (n *Node) GetHandle() *Handle { + if atomic.AddInt32(&n.ref, 1) <= 1 { + panic("BUG: Node.GetHandle on zero ref") + } + return &Handle{unsafe.Pointer(n)} +} + +func (n *Node) unref() { + if atomic.AddInt32(&n.ref, -1) == 0 { + n.r.delete(n) + } +} + +func (n *Node) unrefLocked() { + if atomic.AddInt32(&n.ref, -1) == 0 { + n.r.mu.RLock() + if !n.r.closed { + n.r.delete(n) + } + n.r.mu.RUnlock() + } +} + +// Handle is a 'cache handle' of a 'cache node'. +type Handle struct { + n unsafe.Pointer // *Node +} + +// Value returns the value of the 'cache node'. +func (h *Handle) Value() Value { + n := (*Node)(atomic.LoadPointer(&h.n)) + if n != nil { + return n.value + } + return nil +} + +// Release releases this 'cache handle'. +// It is safe to call release multiple times. +func (h *Handle) Release() { + nPtr := atomic.LoadPointer(&h.n) + if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { + n := (*Node)(nPtr) + n.unrefLocked() + } +} + +func murmur32(ns, key uint64, seed uint32) uint32 { + const ( + m = uint32(0x5bd1e995) + r = 24 + ) + + k1 := uint32(ns >> 32) + k2 := uint32(ns) + k3 := uint32(key >> 32) + k4 := uint32(key) + + k1 *= m + k1 ^= k1 >> r + k1 *= m + + k2 *= m + k2 ^= k2 >> r + k2 *= m + + k3 *= m + k3 ^= k3 >> r + k3 *= m + + k4 *= m + k4 ^= k4 >> r + k4 *= m + + h := seed + + h *= m + h ^= k1 + h *= m + h ^= k2 + h *= m + h ^= k3 + h *= m + h ^= k4 + + h ^= h >> 13 + h *= m + h ^= h >> 15 + + return h +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go new file mode 100644 index 0000000..d9a84cd --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go @@ -0,0 +1,195 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "sync" + "unsafe" +) + +type lruNode struct { + n *Node + h *Handle + ban bool + + next, prev *lruNode +} + +func (n *lruNode) insert(at *lruNode) { + x := at.next + at.next = n + n.prev = at + n.next = x + x.prev = n +} + +func (n *lruNode) remove() { + if n.prev != nil { + n.prev.next = n.next + n.next.prev = n.prev + n.prev = nil + n.next = nil + } else { + panic("BUG: removing removed node") + } +} + +type lru struct { + mu sync.Mutex + capacity int + used int + recent lruNode +} + +func (r *lru) reset() { + r.recent.next = &r.recent + r.recent.prev = &r.recent + r.used = 0 +} + +func (r *lru) Capacity() int { + r.mu.Lock() + defer r.mu.Unlock() + return r.capacity +} + +func (r *lru) SetCapacity(capacity int) { + var evicted []*lruNode + + r.mu.Lock() + r.capacity = capacity + for r.used > r.capacity { + rn := r.recent.prev + if rn == nil { + panic("BUG: invalid LRU used or capacity counter") + } + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) Promote(n *Node) { + var evicted []*lruNode + + r.mu.Lock() + if n.CacheData == nil { + if n.Size() <= r.capacity { + rn := &lruNode{n: n, h: n.GetHandle()} + rn.insert(&r.recent) + n.CacheData = unsafe.Pointer(rn) + r.used += n.Size() + + for r.used > r.capacity { + rn := r.recent.prev + if rn == nil { + panic("BUG: invalid LRU used or capacity counter") + } + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + } + } else { + rn := (*lruNode)(n.CacheData) + if !rn.ban { + rn.remove() + rn.insert(&r.recent) + } + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) Ban(n *Node) { + r.mu.Lock() + if n.CacheData == nil { + n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) + } else { + rn := (*lruNode)(n.CacheData) + if !rn.ban { + rn.remove() + rn.ban = true + r.used -= rn.n.Size() + r.mu.Unlock() + + rn.h.Release() + rn.h = nil + return + } + } + r.mu.Unlock() +} + +func (r *lru) Evict(n *Node) { + r.mu.Lock() + rn := (*lruNode)(n.CacheData) + if rn == nil || rn.ban { + r.mu.Unlock() + return + } + n.CacheData = nil + r.mu.Unlock() + + rn.h.Release() +} + +func (r *lru) EvictNS(ns uint64) { + var evicted []*lruNode + + r.mu.Lock() + for e := r.recent.prev; e != &r.recent; { + rn := e + e = e.prev + if rn.n.NS() == ns { + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) EvictAll() { + r.mu.Lock() + back := r.recent.prev + for rn := back; rn != &r.recent; rn = rn.prev { + rn.n.CacheData = nil + } + r.reset() + r.mu.Unlock() + + for rn := back; rn != &r.recent; rn = rn.prev { + rn.h.Release() + } +} + +func (r *lru) Close() error { + return nil +} + +// NewLRU create a new LRU-cache. +func NewLRU(capacity int) Cacher { + r := &lru{capacity: capacity} + r.reset() + return r +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go new file mode 100644 index 0000000..448402b --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go @@ -0,0 +1,67 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/comparer" +) + +type iComparer struct { + ucmp comparer.Comparer +} + +func (icmp *iComparer) uName() string { + return icmp.ucmp.Name() +} + +func (icmp *iComparer) uCompare(a, b []byte) int { + return icmp.ucmp.Compare(a, b) +} + +func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { + return icmp.ucmp.Separator(dst, a, b) +} + +func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { + return icmp.ucmp.Successor(dst, b) +} + +func (icmp *iComparer) Name() string { + return icmp.uName() +} + +func (icmp *iComparer) Compare(a, b []byte) int { + x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey()) + if x == 0 { + if m, n := internalKey(a).num(), internalKey(b).num(); m > n { + return -1 + } else if m < n { + return 1 + } + } + return x +} + +func (icmp *iComparer) Separator(dst, a, b []byte) []byte { + ua, ub := internalKey(a).ukey(), internalKey(b).ukey() + dst = icmp.uSeparator(dst, ua, ub) + if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { + // Append earliest possible number. + return append(dst, keyMaxNumBytes...) + } + return nil +} + +func (icmp *iComparer) Successor(dst, b []byte) []byte { + ub := internalKey(b).ukey() + dst = icmp.uSuccessor(dst, ub) + if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { + // Append earliest possible number. + return append(dst, keyMaxNumBytes...) + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go new file mode 100644 index 0000000..abf9fb6 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go @@ -0,0 +1,51 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package comparer + +import "bytes" + +type bytesComparer struct{} + +func (bytesComparer) Compare(a, b []byte) int { + return bytes.Compare(a, b) +} + +func (bytesComparer) Name() string { + return "leveldb.BytewiseComparator" +} + +func (bytesComparer) Separator(dst, a, b []byte) []byte { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for ; i < n && a[i] == b[i]; i++ { + } + if i >= n { + // Do not shorten if one string is a prefix of the other + } else if c := a[i]; c < 0xff && c+1 < b[i] { + dst = append(dst, a[:i+1]...) + dst[len(dst)-1]++ + return dst + } + return nil +} + +func (bytesComparer) Successor(dst, b []byte) []byte { + for i, c := range b { + if c != 0xff { + dst = append(dst, b[:i+1]...) + dst[len(dst)-1]++ + return dst + } + } + return nil +} + +// DefaultComparer are default implementation of the Comparer interface. +// It uses the natural ordering, consistent with bytes.Compare. +var DefaultComparer = bytesComparer{} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go new file mode 100644 index 0000000..2c522db --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go @@ -0,0 +1,57 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package comparer provides interface and implementation for ordering +// sets of data. +package comparer + +// BasicComparer is the interface that wraps the basic Compare method. +type BasicComparer interface { + // Compare returns -1, 0, or +1 depending on whether a is 'less than', + // 'equal to' or 'greater than' b. The two arguments can only be 'equal' + // if their contents are exactly equal. Furthermore, the empty slice + // must be 'less than' any non-empty slice. + Compare(a, b []byte) int +} + +// Comparer defines a total ordering over the space of []byte keys: a 'less +// than' relationship. +type Comparer interface { + BasicComparer + + // Name returns name of the comparer. + // + // The Level-DB on-disk format stores the comparer name, and opening a + // database with a different comparer from the one it was created with + // will result in an error. + // + // An implementation to a new name whenever the comparer implementation + // changes in a way that will cause the relative ordering of any two keys + // to change. + // + // Names starting with "leveldb." are reserved and should not be used + // by any users of this package. + Name() string + + // Bellow are advanced functions used to reduce the space requirements + // for internal data structures such as index blocks. + + // Separator appends a sequence of bytes x to dst such that a <= x && x < b, + // where 'less than' is consistent with Compare. An implementation should + // return nil if x equal to a. + // + // Either contents of a or b should not by any means modified. Doing so + // may cause corruption on the internal state. + Separator(dst, a, b []byte) []byte + + // Successor appends a sequence of bytes x to dst such that x >= b, where + // 'less than' is consistent with Compare. An implementation should return + // nil if x equal to b. + // + // Contents of b should not by any means modified. Doing so may cause + // corruption on the internal state. + Successor(dst, b []byte) []byte +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go new file mode 100644 index 0000000..90fedf7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go @@ -0,0 +1,1179 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "container/list" + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/table" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// DB is a LevelDB database. +type DB struct { + // Need 64-bit alignment. + seq uint64 + + // Stats. Need 64-bit alignment. + cWriteDelay int64 // The cumulative duration of write delays + cWriteDelayN int32 // The cumulative number of write delays + inWritePaused int32 // The indicator whether write operation is paused by compaction + aliveSnaps, aliveIters int32 + + // Session. + s *session + + // MemDB. + memMu sync.RWMutex + memPool chan *memdb.DB + mem, frozenMem *memDB + journal *journal.Writer + journalWriter storage.Writer + journalFd storage.FileDesc + frozenJournalFd storage.FileDesc + frozenSeq uint64 + + // Snapshot. + snapsMu sync.Mutex + snapsList *list.List + + // Write. + batchPool sync.Pool + writeMergeC chan writeMerge + writeMergedC chan bool + writeLockC chan struct{} + writeAckC chan error + writeDelay time.Duration + writeDelayN int + tr *Transaction + + // Compaction. + compCommitLk sync.Mutex + tcompCmdC chan cCmd + tcompPauseC chan chan<- struct{} + mcompCmdC chan cCmd + compErrC chan error + compPerErrC chan error + compErrSetC chan error + compWriteLocking bool + compStats cStats + memdbMaxLevel int // For testing. + + // Close. + closeW sync.WaitGroup + closeC chan struct{} + closed uint32 + closer io.Closer +} + +func openDB(s *session) (*DB, error) { + s.log("db@open opening") + start := time.Now() + db := &DB{ + s: s, + // Initial sequence + seq: s.stSeqNum, + // MemDB + memPool: make(chan *memdb.DB, 1), + // Snapshot + snapsList: list.New(), + // Write + batchPool: sync.Pool{New: newBatch}, + writeMergeC: make(chan writeMerge), + writeMergedC: make(chan bool), + writeLockC: make(chan struct{}, 1), + writeAckC: make(chan error), + // Compaction + tcompCmdC: make(chan cCmd), + tcompPauseC: make(chan chan<- struct{}), + mcompCmdC: make(chan cCmd), + compErrC: make(chan error), + compPerErrC: make(chan error), + compErrSetC: make(chan error), + // Close + closeC: make(chan struct{}), + } + + // Read-only mode. + readOnly := s.o.GetReadOnly() + + if readOnly { + // Recover journals (read-only mode). + if err := db.recoverJournalRO(); err != nil { + return nil, err + } + } else { + // Recover journals. + if err := db.recoverJournal(); err != nil { + return nil, err + } + + // Remove any obsolete files. + if err := db.checkAndCleanFiles(); err != nil { + // Close journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return nil, err + } + + } + + // Doesn't need to be included in the wait group. + go db.compactionError() + go db.mpoolDrain() + + if readOnly { + db.SetReadOnly() + } else { + db.closeW.Add(2) + go db.tCompaction() + go db.mCompaction() + // go db.jWriter() + } + + s.logf("db@open done T·%v", time.Since(start)) + + runtime.SetFinalizer(db, (*DB).Close) + return db, nil +} + +// Open opens or creates a DB for the given storage. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist Open will returns +// os.ErrExist error. +// +// Open will return an error with type of ErrCorrupted if corruption +// detected in the DB. Use errors.IsCorrupted to test whether an error is +// due to corruption. Corrupted DB can be recovered with Recover function. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = s.recover() + if err != nil { + if !os.IsNotExist(err) || s.o.GetErrorIfMissing() || s.o.GetReadOnly() { + return + } + err = s.create() + if err != nil { + return + } + } else if s.o.GetErrorIfExist() { + err = os.ErrExist + return + } + + return openDB(s) +} + +// OpenFile opens or creates a DB for the given path. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist OpenFile will returns +// os.ErrExist error. +// +// OpenFile uses standard file-system backed storage implementation as +// described in the leveldb/storage package. +// +// OpenFile will return an error with type of ErrCorrupted if corruption +// detected in the DB. Use errors.IsCorrupted to test whether an error is +// due to corruption. Corrupted DB can be recovered with Recover function. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func OpenFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path, o.GetReadOnly()) + if err != nil { + return + } + db, err = Open(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +// Recover recovers and opens a DB with missing or corrupted manifest files +// for the given storage. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = recoverTable(s, o) + if err != nil { + return + } + return openDB(s) +} + +// RecoverFile recovers and opens a DB with missing or corrupted manifest files +// for the given path. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// RecoverFile uses standard file-system backed storage implementation as described +// in the leveldb/storage package. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func RecoverFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path, false) + if err != nil { + return + } + db, err = Recover(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +func recoverTable(s *session, o *opt.Options) error { + o = dupOptions(o) + // Mask StrictReader, lets StrictRecovery doing its job. + o.Strict &= ^opt.StrictReader + + // Get all tables and sort it by file number. + fds, err := s.stor.List(storage.TypeTable) + if err != nil { + return err + } + sortFds(fds) + + var ( + maxSeq uint64 + recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int + + // We will drop corrupted table. + strict = o.GetStrict(opt.StrictRecovery) + noSync = o.GetNoSync() + + rec = &sessionRecord{} + bpool = util.NewBufferPool(o.GetBlockSize() + 5) + ) + buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) { + tmpFd = s.newTemp() + writer, err := s.stor.Create(tmpFd) + if err != nil { + return + } + defer func() { + writer.Close() + if err != nil { + s.stor.Remove(tmpFd) + tmpFd = storage.FileDesc{} + } + }() + + // Copy entries. + tw := table.NewWriter(writer, o) + for iter.Next() { + key := iter.Key() + if validInternalKey(key) { + err = tw.Append(key, iter.Value()) + if err != nil { + return + } + } + } + err = iter.Error() + if err != nil && !errors.IsCorrupted(err) { + return + } + err = tw.Close() + if err != nil { + return + } + if !noSync { + err = writer.Sync() + if err != nil { + return + } + } + size = int64(tw.BytesLen()) + return + } + recoverTable := func(fd storage.FileDesc) error { + s.logf("table@recovery recovering @%d", fd.Num) + reader, err := s.stor.Open(fd) + if err != nil { + return err + } + var closed bool + defer func() { + if !closed { + reader.Close() + } + }() + + // Get file size. + size, err := reader.Seek(0, 2) + if err != nil { + return err + } + + var ( + tSeq uint64 + tgoodKey, tcorruptedKey, tcorruptedBlock int + imin, imax []byte + ) + tr, err := table.NewReader(reader, size, fd, nil, bpool, o) + if err != nil { + return err + } + iter := tr.NewIterator(nil, nil) + if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { + itererr.SetErrorCallback(func(err error) { + if errors.IsCorrupted(err) { + s.logf("table@recovery block corruption @%d %q", fd.Num, err) + tcorruptedBlock++ + } + }) + } + + // Scan the table. + for iter.Next() { + key := iter.Key() + _, seq, _, kerr := parseInternalKey(key) + if kerr != nil { + tcorruptedKey++ + continue + } + tgoodKey++ + if seq > tSeq { + tSeq = seq + } + if imin == nil { + imin = append([]byte{}, key...) + } + imax = append(imax[:0], key...) + } + if err := iter.Error(); err != nil && !errors.IsCorrupted(err) { + iter.Release() + return err + } + iter.Release() + + goodKey += tgoodKey + corruptedKey += tcorruptedKey + corruptedBlock += tcorruptedBlock + + if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { + droppedTable++ + s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + return nil + } + + if tgoodKey > 0 { + if tcorruptedKey > 0 || tcorruptedBlock > 0 { + // Rebuild the table. + s.logf("table@recovery rebuilding @%d", fd.Num) + iter := tr.NewIterator(nil, nil) + tmpFd, newSize, err := buildTable(iter) + iter.Release() + if err != nil { + return err + } + closed = true + reader.Close() + if err := s.stor.Rename(tmpFd, fd); err != nil { + return err + } + size = newSize + } + if tSeq > maxSeq { + maxSeq = tSeq + } + recoveredKey += tgoodKey + // Add table to level 0. + rec.addTable(0, fd.Num, size, imin, imax) + s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + } else { + droppedTable++ + s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size) + } + + return nil + } + + // Recover all tables. + if len(fds) > 0 { + s.logf("table@recovery F·%d", len(fds)) + + // Mark file number as used. + s.markFileNum(fds[len(fds)-1].Num) + + for _, fd := range fds { + if err := recoverTable(fd); err != nil { + return err + } + } + + s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq) + } + + // Set sequence number. + rec.setSeqNum(maxSeq) + + // Create new manifest. + if err := s.create(); err != nil { + return err + } + + // Commit. + return s.commit(rec) +} + +func (db *DB) recoverJournal() error { + // Get all journals and sort it by file number. + rawFds, err := db.s.stor.List(storage.TypeJournal) + if err != nil { + return err + } + sortFds(rawFds) + + // Journals that will be recovered. + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) + } + } + + var ( + ofd storage.FileDesc // Obsolete file. + rec = &sessionRecord{} + ) + + // Recover journals. + if len(fds) > 0 { + db.logf("journal@recovery F·%d", len(fds)) + + // Mark file number as used. + db.s.markFileNum(fds[len(fds)-1].Num) + + var ( + // Options. + strict = db.s.o.GetStrict(opt.StrictJournal) + checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) + writeBuffer = db.s.o.GetWriteBuffer() + + jr *journal.Reader + mdb = memdb.New(db.s.icmp, writeBuffer) + buf = &util.Buffer{} + batchSeq uint64 + batchLen int + ) + + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) + + fr, err := db.s.stor.Open(fd) + if err != nil { + return err + } + + // Create or reset journal reader instance. + if jr == nil { + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) + } else { + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) + } + + // Flush memdb and remove obsolete journal file. + if !ofd.Zero() { + if mdb.Len() > 0 { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + fr.Close() + return err + } + } + + rec.setJournalNum(fd.Num) + rec.setSeqNum(db.seq) + if err := db.s.commit(rec); err != nil { + fr.Close() + return err + } + rec.resetAddedTables() + + db.s.stor.Remove(ofd) + ofd = storage.FileDesc{} + } + + // Replay journal to memdb. + mdb.Reset() + for { + r, err := jr.Next() + if err != nil { + if err == io.EOF { + break + } + + fr.Close() + return errors.SetFd(err, fd) + } + + buf.Reset() + if _, err := buf.ReadFrom(r); err != nil { + if err == io.ErrUnexpectedEOF { + // This is error returned due to corruption, with strict == false. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) + if err != nil { + if !strict && errors.IsCorrupted(err) { + db.s.logf("journal error: %v (skipped)", err) + // We won't apply sequence number as it might be corrupted. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + + // Save sequence number. + db.seq = batchSeq + uint64(batchLen) + + // Flush it if large enough. + if mdb.Size() >= writeBuffer { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + fr.Close() + return err + } + + mdb.Reset() + } + } + + fr.Close() + ofd = fd + } + + // Flush the last memdb. + if mdb.Len() > 0 { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + return err + } + } + } + + // Create a new journal. + if _, err := db.newMem(0); err != nil { + return err + } + + // Commit. + rec.setJournalNum(db.journalFd.Num) + rec.setSeqNum(db.seq) + if err := db.s.commit(rec); err != nil { + // Close journal on error. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return err + } + + // Remove the last obsolete journal file. + if !ofd.Zero() { + db.s.stor.Remove(ofd) + } + + return nil +} + +func (db *DB) recoverJournalRO() error { + // Get all journals and sort it by file number. + rawFds, err := db.s.stor.List(storage.TypeJournal) + if err != nil { + return err + } + sortFds(rawFds) + + // Journals that will be recovered. + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) + } + } + + var ( + // Options. + strict = db.s.o.GetStrict(opt.StrictJournal) + checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) + writeBuffer = db.s.o.GetWriteBuffer() + + mdb = memdb.New(db.s.icmp, writeBuffer) + ) + + // Recover journals. + if len(fds) > 0 { + db.logf("journal@recovery RO·Mode F·%d", len(fds)) + + var ( + jr *journal.Reader + buf = &util.Buffer{} + batchSeq uint64 + batchLen int + ) + + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) + + fr, err := db.s.stor.Open(fd) + if err != nil { + return err + } + + // Create or reset journal reader instance. + if jr == nil { + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) + } else { + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) + } + + // Replay journal to memdb. + for { + r, err := jr.Next() + if err != nil { + if err == io.EOF { + break + } + + fr.Close() + return errors.SetFd(err, fd) + } + + buf.Reset() + if _, err := buf.ReadFrom(r); err != nil { + if err == io.ErrUnexpectedEOF { + // This is error returned due to corruption, with strict == false. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) + if err != nil { + if !strict && errors.IsCorrupted(err) { + db.s.logf("journal error: %v (skipped)", err) + // We won't apply sequence number as it might be corrupted. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + + // Save sequence number. + db.seq = batchSeq + uint64(batchLen) + } + + fr.Close() + } + } + + // Set memDB. + db.mem = &memDB{db: db, DB: mdb, ref: 1} + + return nil +} + +func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) { + mk, mv, err := mdb.Find(ikey) + if err == nil { + ukey, _, kt, kerr := parseInternalKey(mk) + if kerr != nil { + // Shouldn't have had happen. + panic(kerr) + } + if icmp.uCompare(ukey, ikey.ukey()) == 0 { + if kt == keyTypeDel { + return true, nil, ErrNotFound + } + return true, mv, nil + + } + } else if err != ErrNotFound { + return true, nil, err + } + return +} + +func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me + } + } + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me + } + } + + v := db.s.version() + value, cSched, err := v.get(auxt, ikey, ro, false) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + return +} + +func nilIfNotFound(err error) error { + if err == ErrNotFound { + return nil + } + return err +} + +func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } + + v := db.s.version() + _, cSched, err := v.get(auxt, ikey, ro, true) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + if err == nil { + ret = true + } else if err == ErrNotFound { + err = nil + } + return +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contains the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = db.ok() + if err != nil { + return + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + return db.get(nil, nil, key, se.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Has returns. +func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { + err = db.ok() + if err != nil { + return + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + return db.has(nil, nil, key, se.seq, ro) +} + +// NewIterator returns an iterator for the latest snapshot of the +// underlying DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + // Iterator holds 'version' lock, 'version' is immutable so snapshot + // can be released after iterator created. + return db.newIterator(nil, nil, se.seq, slice, ro) +} + +// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot +// is a frozen snapshot of a DB state at a particular point in time. The +// content of snapshot are guaranteed to be consistent. +// +// The snapshot must be released after use, by calling Release method. +func (db *DB) GetSnapshot() (*Snapshot, error) { + if err := db.ok(); err != nil { + return nil, err + } + + return db.newSnapshot(), nil +} + +// GetProperty returns value of the given property name. +// +// Property names: +// leveldb.num-files-at-level{n} +// Returns the number of files at level 'n'. +// leveldb.stats +// Returns statistics of the underlying DB. +// leveldb.iostats +// Returns statistics of effective disk read and write. +// leveldb.writedelay +// Returns cumulative write delay caused by compaction. +// leveldb.sstables +// Returns sstables list for each level. +// leveldb.blockpool +// Returns block pool stats. +// leveldb.cachedblock +// Returns size of cached block. +// leveldb.openedtables +// Returns number of opened tables. +// leveldb.alivesnaps +// Returns number of alive snapshots. +// leveldb.aliveiters +// Returns number of alive iterators. +func (db *DB) GetProperty(name string) (value string, err error) { + err = db.ok() + if err != nil { + return + } + + const prefix = "leveldb." + if !strings.HasPrefix(name, prefix) { + return "", ErrNotFound + } + p := name[len(prefix):] + + v := db.s.version() + defer v.release() + + numFilesPrefix := "num-files-at-level" + switch { + case strings.HasPrefix(p, numFilesPrefix): + var level uint + var rest string + n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) + if n != 1 { + err = ErrNotFound + } else { + value = fmt.Sprint(v.tLen(int(level))) + } + case p == "stats": + value = "Compactions\n" + + " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + + "-------+------------+---------------+---------------+---------------+---------------\n" + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) + if len(tables) == 0 && duration == 0 { + continue + } + value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", + level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), + float64(read)/1048576.0, float64(write)/1048576.0) + } + case p == "iostats": + value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f", + float64(db.s.stor.reads())/1048576.0, + float64(db.s.stor.writes())/1048576.0) + case p == "writedelay": + writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay)) + paused := atomic.LoadInt32(&db.inWritePaused) == 1 + value = fmt.Sprintf("DelayN:%d Delay:%s Paused:%t", writeDelayN, writeDelay, paused) + case p == "sstables": + for level, tables := range v.levels { + value += fmt.Sprintf("--- level %d ---\n", level) + for _, t := range tables { + value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax) + } + } + case p == "blockpool": + value = fmt.Sprintf("%v", db.s.tops.bpool) + case p == "cachedblock": + if db.s.tops.bcache != nil { + value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) + } else { + value = "" + } + case p == "openedtables": + value = fmt.Sprintf("%d", db.s.tops.cache.Size()) + case p == "alivesnaps": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) + case p == "aliveiters": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) + default: + err = ErrNotFound + } + + return +} + +// DBStats is database statistics. +type DBStats struct { + WriteDelayCount int32 + WriteDelayDuration time.Duration + WritePaused bool + + AliveSnapshots int32 + AliveIterators int32 + + IOWrite uint64 + IORead uint64 + + BlockCacheSize int + OpenedTablesCount int + + LevelSizes []int64 + LevelTablesCounts []int + LevelRead []int64 + LevelWrite []int64 + LevelDurations []time.Duration +} + +// Stats populates s with database statistics. +func (db *DB) Stats(s *DBStats) error { + err := db.ok() + if err != nil { + return err + } + + s.IORead = db.s.stor.reads() + s.IOWrite = db.s.stor.writes() + s.WriteDelayCount = atomic.LoadInt32(&db.cWriteDelayN) + s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay)) + s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1 + + s.OpenedTablesCount = db.s.tops.cache.Size() + if db.s.tops.bcache != nil { + s.BlockCacheSize = db.s.tops.bcache.Size() + } else { + s.BlockCacheSize = 0 + } + + s.AliveIterators = atomic.LoadInt32(&db.aliveIters) + s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps) + + s.LevelDurations = s.LevelDurations[:0] + s.LevelRead = s.LevelRead[:0] + s.LevelWrite = s.LevelWrite[:0] + s.LevelSizes = s.LevelSizes[:0] + s.LevelTablesCounts = s.LevelTablesCounts[:0] + + v := db.s.version() + defer v.release() + + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) + if len(tables) == 0 && duration == 0 { + continue + } + s.LevelDurations = append(s.LevelDurations, duration) + s.LevelRead = append(s.LevelRead, read) + s.LevelWrite = append(s.LevelWrite, write) + s.LevelSizes = append(s.LevelSizes, tables.size()) + s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables)) + } + + return nil +} + +// SizeOf calculates approximate sizes of the given key ranges. +// The length of the returned sizes are equal with the length of the given +// ranges. The returned sizes measure storage space usage, so if the user +// data compresses by a factor of ten, the returned sizes will be one-tenth +// the size of the corresponding user data size. +// The results may not include the sizes of recently written data. +func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { + if err := db.ok(); err != nil { + return nil, err + } + + v := db.s.version() + defer v.release() + + sizes := make(Sizes, 0, len(ranges)) + for _, r := range ranges { + imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek) + imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek) + start, err := v.offsetOf(imin) + if err != nil { + return nil, err + } + limit, err := v.offsetOf(imax) + if err != nil { + return nil, err + } + var size int64 + if limit >= start { + size = limit - start + } + sizes = append(sizes, size) + } + + return sizes, nil +} + +// Close closes the DB. This will also releases any outstanding snapshot, +// abort any in-flight compaction and discard open transaction. +// +// It is not safe to close a DB until all outstanding iterators are released. +// It is valid to call Close multiple times. Other methods should not be +// called after the DB has been closed. +func (db *DB) Close() error { + if !db.setClosed() { + return ErrClosed + } + + start := time.Now() + db.log("db@close closing") + + // Clear the finalizer. + runtime.SetFinalizer(db, nil) + + // Get compaction error. + var err error + select { + case err = <-db.compErrC: + if err == ErrReadOnly { + err = nil + } + default: + } + + // Signal all goroutines. + close(db.closeC) + + // Discard open transaction. + if db.tr != nil { + db.tr.Discard() + } + + // Acquire writer lock. + db.writeLockC <- struct{}{} + + // Wait for all gorotines to exit. + db.closeW.Wait() + + // Closes journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + db.journal = nil + db.journalWriter = nil + } + + if db.writeDelayN > 0 { + db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) + } + + // Close session. + db.s.close() + db.logf("db@close done T·%v", time.Since(start)) + db.s.release() + + if db.closer != nil { + if err1 := db.closer.Close(); err == nil { + err = err1 + } + db.closer = nil + } + + // Clear memdbs. + db.clearMems() + + return err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go new file mode 100644 index 0000000..0c1b9a5 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -0,0 +1,854 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +var ( + errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") +) + +type cStat struct { + duration time.Duration + read int64 + write int64 +} + +func (p *cStat) add(n *cStatStaging) { + p.duration += n.duration + p.read += n.read + p.write += n.write +} + +func (p *cStat) get() (duration time.Duration, read, write int64) { + return p.duration, p.read, p.write +} + +type cStatStaging struct { + start time.Time + duration time.Duration + on bool + read int64 + write int64 +} + +func (p *cStatStaging) startTimer() { + if !p.on { + p.start = time.Now() + p.on = true + } +} + +func (p *cStatStaging) stopTimer() { + if p.on { + p.duration += time.Since(p.start) + p.on = false + } +} + +type cStats struct { + lk sync.Mutex + stats []cStat +} + +func (p *cStats) addStat(level int, n *cStatStaging) { + p.lk.Lock() + if level >= len(p.stats) { + newStats := make([]cStat, level+1) + copy(newStats, p.stats) + p.stats = newStats + } + p.stats[level].add(n) + p.lk.Unlock() +} + +func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) { + p.lk.Lock() + defer p.lk.Unlock() + if level < len(p.stats) { + return p.stats[level].get() + } + return +} + +func (db *DB) compactionError() { + var err error +noerr: + // No error. + for { + select { + case err = <-db.compErrSetC: + switch { + case err == nil: + case err == ErrReadOnly, errors.IsCorrupted(err): + goto hasperr + default: + goto haserr + } + case <-db.closeC: + return + } + } +haserr: + // Transient error. + for { + select { + case db.compErrC <- err: + case err = <-db.compErrSetC: + switch { + case err == nil: + goto noerr + case err == ErrReadOnly, errors.IsCorrupted(err): + goto hasperr + default: + } + case <-db.closeC: + return + } + } +hasperr: + // Persistent error. + for { + select { + case db.compErrC <- err: + case db.compPerErrC <- err: + case db.writeLockC <- struct{}{}: + // Hold write lock, so that write won't pass-through. + db.compWriteLocking = true + case <-db.closeC: + if db.compWriteLocking { + // We should release the lock or Close will hang. + <-db.writeLockC + } + return + } + } +} + +type compactionTransactCounter int + +func (cnt *compactionTransactCounter) incr() { + *cnt++ +} + +type compactionTransactInterface interface { + run(cnt *compactionTransactCounter) error + revert() error +} + +func (db *DB) compactionTransact(name string, t compactionTransactInterface) { + defer func() { + if x := recover(); x != nil { + if x == errCompactionTransactExiting { + if err := t.revert(); err != nil { + db.logf("%s revert error %q", name, err) + } + } + panic(x) + } + }() + + const ( + backoffMin = 1 * time.Second + backoffMax = 8 * time.Second + backoffMul = 2 * time.Second + ) + var ( + backoff = backoffMin + backoffT = time.NewTimer(backoff) + lastCnt = compactionTransactCounter(0) + + disableBackoff = db.s.o.GetDisableCompactionBackoff() + ) + for n := 0; ; n++ { + // Check whether the DB is closed. + if db.isClosed() { + db.logf("%s exiting", name) + db.compactionExitTransact() + } else if n > 0 { + db.logf("%s retrying N·%d", name, n) + } + + // Execute. + cnt := compactionTransactCounter(0) + err := t.run(&cnt) + if err != nil { + db.logf("%s error I·%d %q", name, cnt, err) + } + + // Set compaction error status. + select { + case db.compErrSetC <- err: + case perr := <-db.compPerErrC: + if err != nil { + db.logf("%s exiting (persistent error %q)", name, perr) + db.compactionExitTransact() + } + case <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + if err == nil { + return + } + if errors.IsCorrupted(err) { + db.logf("%s exiting (corruption detected)", name) + db.compactionExitTransact() + } + + if !disableBackoff { + // Reset backoff duration if counter is advancing. + if cnt > lastCnt { + backoff = backoffMin + lastCnt = cnt + } + + // Backoff. + backoffT.Reset(backoff) + if backoff < backoffMax { + backoff *= backoffMul + if backoff > backoffMax { + backoff = backoffMax + } + } + select { + case <-backoffT.C: + case <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + } + } +} + +type compactionTransactFunc struct { + runFunc func(cnt *compactionTransactCounter) error + revertFunc func() error +} + +func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { + return t.runFunc(cnt) +} + +func (t *compactionTransactFunc) revert() error { + if t.revertFunc != nil { + return t.revertFunc() + } + return nil +} + +func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { + db.compactionTransact(name, &compactionTransactFunc{run, revert}) +} + +func (db *DB) compactionExitTransact() { + panic(errCompactionTransactExiting) +} + +func (db *DB) compactionCommit(name string, rec *sessionRecord) { + db.compCommitLk.Lock() + defer db.compCommitLk.Unlock() // Defer is necessary. + db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error { + return db.s.commit(rec) + }, nil) +} + +func (db *DB) memCompaction() { + mdb := db.getFrozenMem() + if mdb == nil { + return + } + defer mdb.decref() + + db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size())) + + // Don't compact empty memdb. + if mdb.Len() == 0 { + db.logf("memdb@flush skipping") + // drop frozen memdb + db.dropFrozenMem() + return + } + + // Pause table compaction. + resumeC := make(chan struct{}) + select { + case db.tcompPauseC <- (chan<- struct{})(resumeC): + case <-db.compPerErrC: + close(resumeC) + resumeC = nil + case <-db.closeC: + db.compactionExitTransact() + } + + var ( + rec = &sessionRecord{} + stats = &cStatStaging{} + flushLevel int + ) + + // Generate tables. + db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { + stats.startTimer() + flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel) + stats.stopTimer() + return + }, func() error { + for _, r := range rec.addedTables { + db.logf("memdb@flush revert @%d", r.num) + if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil { + return err + } + } + return nil + }) + + rec.setJournalNum(db.journalFd.Num) + rec.setSeqNum(db.frozenSeq) + + // Commit. + stats.startTimer() + db.compactionCommit("memdb", rec) + stats.stopTimer() + + db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) + + for _, r := range rec.addedTables { + stats.write += r.size + } + db.compStats.addStat(flushLevel, stats) + + // Drop frozen memdb. + db.dropFrozenMem() + + // Resume table compaction. + if resumeC != nil { + select { + case <-resumeC: + close(resumeC) + case <-db.closeC: + db.compactionExitTransact() + } + } + + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) +} + +type tableCompactionBuilder struct { + db *DB + s *session + c *compaction + rec *sessionRecord + stat0, stat1 *cStatStaging + + snapHasLastUkey bool + snapLastUkey []byte + snapLastSeq uint64 + snapIter int + snapKerrCnt int + snapDropCnt int + + kerrCnt int + dropCnt int + + minSeq uint64 + strict bool + tableSize int + + tw *tWriter +} + +func (b *tableCompactionBuilder) appendKV(key, value []byte) error { + // Create new table if not already. + if b.tw == nil { + // Check for pause event. + if b.db != nil { + select { + case ch := <-b.db.tcompPauseC: + b.db.pauseCompaction(ch) + case <-b.db.closeC: + b.db.compactionExitTransact() + default: + } + } + + // Create new table. + var err error + b.tw, err = b.s.tops.create() + if err != nil { + return err + } + } + + // Write key/value into table. + return b.tw.append(key, value) +} + +func (b *tableCompactionBuilder) needFlush() bool { + return b.tw.tw.BytesLen() >= b.tableSize +} + +func (b *tableCompactionBuilder) flush() error { + t, err := b.tw.finish() + if err != nil { + return err + } + b.rec.addTableFile(b.c.sourceLevel+1, t) + b.stat1.write += t.size + b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) + b.tw = nil + return nil +} + +func (b *tableCompactionBuilder) cleanup() { + if b.tw != nil { + b.tw.drop() + b.tw = nil + } +} + +func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { + snapResumed := b.snapIter > 0 + hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. + lastUkey := append([]byte{}, b.snapLastUkey...) + lastSeq := b.snapLastSeq + b.kerrCnt = b.snapKerrCnt + b.dropCnt = b.snapDropCnt + // Restore compaction state. + b.c.restore() + + defer b.cleanup() + + b.stat1.startTimer() + defer b.stat1.stopTimer() + + iter := b.c.newIterator() + defer iter.Release() + for i := 0; iter.Next(); i++ { + // Incr transact counter. + cnt.incr() + + // Skip until last state. + if i < b.snapIter { + continue + } + + resumed := false + if snapResumed { + resumed = true + snapResumed = false + } + + ikey := iter.Key() + ukey, seq, kt, kerr := parseInternalKey(ikey) + + if kerr == nil { + shouldStop := !resumed && b.c.shouldStopBefore(ikey) + + if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { + // First occurrence of this user key. + + // Only rotate tables if ukey doesn't hop across. + if b.tw != nil && (shouldStop || b.needFlush()) { + if err := b.flush(); err != nil { + return err + } + + // Creates snapshot of the state. + b.c.save() + b.snapHasLastUkey = hasLastUkey + b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) + b.snapLastSeq = lastSeq + b.snapIter = i + b.snapKerrCnt = b.kerrCnt + b.snapDropCnt = b.dropCnt + } + + hasLastUkey = true + lastUkey = append(lastUkey[:0], ukey...) + lastSeq = keyMaxSeq + } + + switch { + case lastSeq <= b.minSeq: + // Dropped because newer entry for same user key exist + fallthrough // (A) + case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): + // For this user key: + // (1) there is no data in higher levels + // (2) data in lower levels will have larger seq numbers + // (3) data in layers that are being compacted here and have + // smaller seq numbers will be dropped in the next + // few iterations of this loop (by rule (A) above). + // Therefore this deletion marker is obsolete and can be dropped. + lastSeq = seq + b.dropCnt++ + continue + default: + lastSeq = seq + } + } else { + if b.strict { + return kerr + } + + // Don't drop corrupted keys. + hasLastUkey = false + lastUkey = lastUkey[:0] + lastSeq = keyMaxSeq + b.kerrCnt++ + } + + if err := b.appendKV(ikey, iter.Value()); err != nil { + return err + } + } + + if err := iter.Error(); err != nil { + return err + } + + // Finish last table. + if b.tw != nil && !b.tw.empty() { + return b.flush() + } + return nil +} + +func (b *tableCompactionBuilder) revert() error { + for _, at := range b.rec.addedTables { + b.s.logf("table@build revert @%d", at.num) + if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil { + return err + } + } + return nil +} + +func (db *DB) tableCompaction(c *compaction, noTrivial bool) { + defer c.release() + + rec := &sessionRecord{} + rec.addCompPtr(c.sourceLevel, c.imax) + + if !noTrivial && c.trivial() { + t := c.levels[0][0] + db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1) + rec.delTable(c.sourceLevel, t.fd.Num) + rec.addTableFile(c.sourceLevel+1, t) + db.compactionCommit("table-move", rec) + return + } + + var stats [2]cStatStaging + for i, tables := range c.levels { + for _, t := range tables { + stats[i].read += t.size + // Insert deleted tables into record + rec.delTable(c.sourceLevel+i, t.fd.Num) + } + } + sourceSize := int(stats[0].read + stats[1].read) + minSeq := db.minSeq() + db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) + + b := &tableCompactionBuilder{ + db: db, + s: db.s, + c: c, + rec: rec, + stat1: &stats[1], + minSeq: minSeq, + strict: db.s.o.GetStrict(opt.StrictCompaction), + tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1), + } + db.compactionTransact("table@build", b) + + // Commit. + stats[1].startTimer() + db.compactionCommit("table", rec) + stats[1].stopTimer() + + resultSize := int(stats[1].write) + db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) + + // Save compaction stats + for i := range stats { + db.compStats.addStat(c.sourceLevel+1, &stats[i]) + } +} + +func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error { + db.logf("table@compaction range L%d %q:%q", level, umin, umax) + if level >= 0 { + if c := db.s.getCompactionRange(level, umin, umax, true); c != nil { + db.tableCompaction(c, true) + } + } else { + // Retry until nothing to compact. + for { + compacted := false + + // Scan for maximum level with overlapped tables. + v := db.s.version() + m := 1 + for i := m; i < len(v.levels); i++ { + tables := v.levels[i] + if tables.overlaps(db.s.icmp, umin, umax, false) { + m = i + } + } + v.release() + + for level := 0; level < m; level++ { + if c := db.s.getCompactionRange(level, umin, umax, false); c != nil { + db.tableCompaction(c, true) + compacted = true + } + } + + if !compacted { + break + } + } + } + + return nil +} + +func (db *DB) tableAutoCompaction() { + if c := db.s.pickCompaction(); c != nil { + db.tableCompaction(c, false) + } +} + +func (db *DB) tableNeedCompaction() bool { + v := db.s.version() + defer v.release() + return v.needCompaction() +} + +// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted. +func (db *DB) resumeWrite() bool { + v := db.s.version() + defer v.release() + if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() { + return true + } + return false +} + +func (db *DB) pauseCompaction(ch chan<- struct{}) { + select { + case ch <- struct{}{}: + case <-db.closeC: + db.compactionExitTransact() + } +} + +type cCmd interface { + ack(err error) +} + +type cAuto struct { + // Note for table compaction, an non-empty ackC represents it's a compaction waiting command. + ackC chan<- error +} + +func (r cAuto) ack(err error) { + if r.ackC != nil { + defer func() { + recover() + }() + r.ackC <- err + } +} + +type cRange struct { + level int + min, max []byte + ackC chan<- error +} + +func (r cRange) ack(err error) { + if r.ackC != nil { + defer func() { + recover() + }() + r.ackC <- err + } +} + +// This will trigger auto compaction but will not wait for it. +func (db *DB) compTrigger(compC chan<- cCmd) { + select { + case compC <- cAuto{}: + default: + } +} + +// This will trigger auto compaction and/or wait for all compaction to be done. +func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cAuto{ch}: + case err = <-db.compErrC: + return + case <-db.closeC: + return ErrClosed + } + // Wait cmd. + select { + case err = <-ch: + case err = <-db.compErrC: + case <-db.closeC: + return ErrClosed + } + return err +} + +// Send range compaction request. +func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cRange{level, min, max, ch}: + case err := <-db.compErrC: + return err + case <-db.closeC: + return ErrClosed + } + // Wait cmd. + select { + case err = <-ch: + case err = <-db.compErrC: + case <-db.closeC: + return ErrClosed + } + return err +} + +func (db *DB) mCompaction() { + var x cCmd + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + select { + case x = <-db.mcompCmdC: + switch x.(type) { + case cAuto: + db.memCompaction() + x.ack(nil) + x = nil + default: + panic("leveldb: unknown command") + } + case <-db.closeC: + return + } + } +} + +func (db *DB) tCompaction() { + var ( + x cCmd + waitQ []cCmd + ) + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + for i := range waitQ { + waitQ[i].ack(ErrClosed) + waitQ[i] = nil + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + if db.tableNeedCompaction() { + select { + case x = <-db.tcompCmdC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case <-db.closeC: + return + default: + } + // Resume write operation as soon as possible. + if len(waitQ) > 0 && db.resumeWrite() { + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] + } + } else { + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] + select { + case x = <-db.tcompCmdC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case <-db.closeC: + return + } + } + if x != nil { + switch cmd := x.(type) { + case cAuto: + if cmd.ackC != nil { + // Check the write pause state before caching it. + if db.resumeWrite() { + x.ack(nil) + } else { + waitQ = append(waitQ, x) + } + } + case cRange: + x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) + default: + panic("leveldb: unknown command") + } + x = nil + } + db.tableAutoCompaction() + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go new file mode 100644 index 0000000..03c24cd --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go @@ -0,0 +1,360 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "math/rand" + "runtime" + "sync" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") +) + +type memdbReleaser struct { + once sync.Once + m *memDB +} + +func (mr *memdbReleaser) Release() { + mr.once.Do(func() { + mr.m.decref() + }) +} + +func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) + em, fm := db.getMems() + v := db.s.version() + + tableIts := v.getIterators(slice, ro) + n := len(tableIts) + len(auxt) + 3 + its := make([]iterator.Iterator, 0, n) + + if auxm != nil { + ami := auxm.NewIterator(slice) + ami.SetReleaser(&memdbReleaser{m: auxm}) + its = append(its, ami) + } + for _, t := range auxt { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + + emi := em.NewIterator(slice) + emi.SetReleaser(&memdbReleaser{m: em}) + its = append(its, emi) + if fm != nil { + fmi := fm.NewIterator(slice) + fmi.SetReleaser(&memdbReleaser{m: fm}) + its = append(its, fmi) + } + its = append(its, tableIts...) + mi := iterator.NewMergedIterator(its, db.s.icmp, strict) + mi.SetReleaser(&versionReleaser{v: v}) + return mi +} + +func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { + var islice *util.Range + if slice != nil { + islice = &util.Range{} + if slice.Start != nil { + islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek) + } + if slice.Limit != nil { + islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek) + } + } + rawIter := db.newRawIterator(auxm, auxt, islice, ro) + iter := &dbIter{ + db: db, + icmp: db.s.icmp, + iter: rawIter, + seq: seq, + strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), + key: make([]byte, 0), + value: make([]byte, 0), + } + atomic.AddInt32(&db.aliveIters, 1) + runtime.SetFinalizer(iter, (*dbIter).Release) + return iter +} + +func (db *DB) iterSamplingRate() int { + return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +// dbIter represent an interator states over a database session. +type dbIter struct { + db *DB + icmp *iComparer + iter iterator.Iterator + seq uint64 + strict bool + + smaplingGap int + dir dir + key []byte + value []byte + err error + releaser util.Releaser +} + +func (i *dbIter) sampleSeek() { + ikey := i.iter.Key() + i.smaplingGap -= len(ikey) + len(i.iter.Value()) + for i.smaplingGap < 0 { + i.smaplingGap += i.db.iterSamplingRate() + i.db.sampleSeek(ikey) + } +} + +func (i *dbIter) setErr(err error) { + i.err = err + i.key = nil + i.value = nil +} + +func (i *dbIter) iterErr() { + if err := i.iter.Error(); err != nil { + i.setErr(err) + } +} + +func (i *dbIter) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *dbIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.First() { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.Last() { + return i.prev() + } + i.dir = dirSOI + i.iterErr() + return false +} + +func (i *dbIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek) + if i.iter.Seek(ikey) { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) next() bool { + for { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if seq <= i.seq { + switch kt { + case keyTypeDel: + // Skip deleted key. + i.key = append(i.key[:0], ukey...) + i.dir = dirForward + case keyTypeVal: + if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + i.dir = dirForward + return true + } + } + } + } else if i.strict { + i.setErr(kerr) + break + } + if !i.iter.Next() { + i.dir = dirEOI + i.iterErr() + break + } + } + return false +} + +func (i *dbIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { + i.dir = dirEOI + i.iterErr() + return false + } + return i.next() +} + +func (i *dbIter) prev() bool { + i.dir = dirBackward + del := true + if i.iter.Valid() { + for { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if seq <= i.seq { + if !del && i.icmp.uCompare(ukey, i.key) < 0 { + return true + } + del = (kt == keyTypeDel) + if !del { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + } + } + } else if i.strict { + i.setErr(kerr) + return false + } + if !i.iter.Prev() { + break + } + } + } + if del { + i.dir = dirSOI + i.iterErr() + return false + } + return true +} + +func (i *dbIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + for i.iter.Prev() { + if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if i.icmp.uCompare(ukey, i.key) < 0 { + goto cont + } + } else if i.strict { + i.setErr(kerr) + return false + } + } + i.dir = dirSOI + i.iterErr() + return false + } + +cont: + return i.prev() +} + +func (i *dbIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *dbIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *dbIter) Release() { + if i.dir != dirReleased { + // Clear the finalizer. + runtime.SetFinalizer(i, nil) + + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + + i.dir = dirReleased + i.key = nil + i.value = nil + i.iter.Release() + i.iter = nil + atomic.AddInt32(&i.db.aliveIters, -1) + i.db = nil + } +} + +func (i *dbIter) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *dbIter) Error() error { + return i.err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go new file mode 100644 index 0000000..c2ad70c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go @@ -0,0 +1,187 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "container/list" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type snapshotElement struct { + seq uint64 + ref int + e *list.Element +} + +// Acquires a snapshot, based on latest sequence. +func (db *DB) acquireSnapshot() *snapshotElement { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + seq := db.getSeq() + + if e := db.snapsList.Back(); e != nil { + se := e.Value.(*snapshotElement) + if se.seq == seq { + se.ref++ + return se + } else if seq < se.seq { + panic("leveldb: sequence number is not increasing") + } + } + se := &snapshotElement{seq: seq, ref: 1} + se.e = db.snapsList.PushBack(se) + return se +} + +// Releases given snapshot element. +func (db *DB) releaseSnapshot(se *snapshotElement) { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + se.ref-- + if se.ref == 0 { + db.snapsList.Remove(se.e) + se.e = nil + } else if se.ref < 0 { + panic("leveldb: Snapshot: negative element reference") + } +} + +// Gets minimum sequence that not being snapshotted. +func (db *DB) minSeq() uint64 { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + if e := db.snapsList.Front(); e != nil { + return e.Value.(*snapshotElement).seq + } + + return db.getSeq() +} + +// Snapshot is a DB snapshot. +type Snapshot struct { + db *DB + elem *snapshotElement + mu sync.RWMutex + released bool +} + +// Creates new snapshot object. +func (db *DB) newSnapshot() *Snapshot { + snap := &Snapshot{ + db: db, + elem: db.acquireSnapshot(), + } + atomic.AddInt32(&db.aliveSnaps, 1) + runtime.SetFinalizer(snap, (*Snapshot).Release) + return snap +} + +func (snap *Snapshot) String() string { + return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) +} + +// Get gets the value for the given key. It returns ErrNotFound if +// the DB does not contains the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = snap.db.ok() + if err != nil { + return + } + snap.mu.RLock() + defer snap.mu.RUnlock() + if snap.released { + err = ErrSnapshotReleased + return + } + return snap.db.get(nil, nil, key, snap.elem.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Get returns. +func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { + err = snap.db.ok() + if err != nil { + return + } + snap.mu.RLock() + defer snap.mu.RUnlock() + if snap.released { + err = ErrSnapshotReleased + return + } + return snap.db.has(nil, nil, key, snap.elem.seq, ro) +} + +// NewIterator returns an iterator for the snapshot of the underlying DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Value() methods), its content should not be +// modified unless noted otherwise. +// +// The iterator must be released after use, by calling Release method. +// Releasing the snapshot doesn't mean releasing the iterator too, the +// iterator would be still valid until released. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := snap.db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + snap.mu.Lock() + defer snap.mu.Unlock() + if snap.released { + return iterator.NewEmptyIterator(ErrSnapshotReleased) + } + // Since iterator already hold version ref, it doesn't need to + // hold snapshot ref. + return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro) +} + +// Release releases the snapshot. This will not release any returned +// iterators, the iterators would still be valid until released or the +// underlying DB is closed. +// +// Other methods should not be called after the snapshot has been released. +func (snap *Snapshot) Release() { + snap.mu.Lock() + defer snap.mu.Unlock() + + if !snap.released { + // Clear the finalizer. + runtime.SetFinalizer(snap, nil) + + snap.released = true + snap.db.releaseSnapshot(snap.elem) + atomic.AddInt32(&snap.db.aliveSnaps, -1) + snap.db = nil + snap.elem = nil + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go new file mode 100644 index 0000000..65e1c54 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go @@ -0,0 +1,239 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +var ( + errHasFrozenMem = errors.New("has frozen mem") +) + +type memDB struct { + db *DB + *memdb.DB + ref int32 +} + +func (m *memDB) getref() int32 { + return atomic.LoadInt32(&m.ref) +} + +func (m *memDB) incref() { + atomic.AddInt32(&m.ref, 1) +} + +func (m *memDB) decref() { + if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { + // Only put back memdb with std capacity. + if m.Capacity() == m.db.s.o.GetWriteBuffer() { + m.Reset() + m.db.mpoolPut(m.DB) + } + m.db = nil + m.DB = nil + } else if ref < 0 { + panic("negative memdb ref") + } +} + +// Get latest sequence number. +func (db *DB) getSeq() uint64 { + return atomic.LoadUint64(&db.seq) +} + +// Atomically adds delta to seq. +func (db *DB) addSeq(delta uint64) { + atomic.AddUint64(&db.seq, delta) +} + +func (db *DB) setSeq(seq uint64) { + atomic.StoreUint64(&db.seq, seq) +} + +func (db *DB) sampleSeek(ikey internalKey) { + v := db.s.version() + if v.sampleSeek(ikey) { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + v.release() +} + +func (db *DB) mpoolPut(mem *memdb.DB) { + if !db.isClosed() { + select { + case db.memPool <- mem: + default: + } + } +} + +func (db *DB) mpoolGet(n int) *memDB { + var mdb *memdb.DB + select { + case mdb = <-db.memPool: + default: + } + if mdb == nil || mdb.Capacity() < n { + mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) + } + return &memDB{ + db: db, + DB: mdb, + } +} + +func (db *DB) mpoolDrain() { + ticker := time.NewTicker(30 * time.Second) + for { + select { + case <-ticker.C: + select { + case <-db.memPool: + default: + } + case <-db.closeC: + ticker.Stop() + // Make sure the pool is drained. + select { + case <-db.memPool: + case <-time.After(time.Second): + } + close(db.memPool) + return + } + } +} + +// Create new memdb and froze the old one; need external synchronization. +// newMem only called synchronously by the writer. +func (db *DB) newMem(n int) (mem *memDB, err error) { + fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} + w, err := db.s.stor.Create(fd) + if err != nil { + db.s.reuseFileNum(fd.Num) + return + } + + db.memMu.Lock() + defer db.memMu.Unlock() + + if db.frozenMem != nil { + return nil, errHasFrozenMem + } + + if db.journal == nil { + db.journal = journal.NewWriter(w) + } else { + db.journal.Reset(w) + db.journalWriter.Close() + db.frozenJournalFd = db.journalFd + } + db.journalWriter = w + db.journalFd = fd + db.frozenMem = db.mem + mem = db.mpoolGet(n) + mem.incref() // for self + mem.incref() // for caller + db.mem = mem + // The seq only incremented by the writer. And whoever called newMem + // should hold write lock, so no need additional synchronization here. + db.frozenSeq = db.seq + return +} + +// Get all memdbs. +func (db *DB) getMems() (e, f *memDB) { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem != nil { + db.mem.incref() + } else if !db.isClosed() { + panic("nil effective mem") + } + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.mem, db.frozenMem +} + +// Get effective memdb. +func (db *DB) getEffectiveMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem != nil { + db.mem.incref() + } else if !db.isClosed() { + panic("nil effective mem") + } + return db.mem +} + +// Check whether we has frozen memdb. +func (db *DB) hasFrozenMem() bool { + db.memMu.RLock() + defer db.memMu.RUnlock() + return db.frozenMem != nil +} + +// Get frozen memdb. +func (db *DB) getFrozenMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.frozenMem +} + +// Drop frozen memdb; assume that frozen memdb isn't nil. +func (db *DB) dropFrozenMem() { + db.memMu.Lock() + if err := db.s.stor.Remove(db.frozenJournalFd); err != nil { + db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err) + } else { + db.logf("journal@remove removed @%d", db.frozenJournalFd.Num) + } + db.frozenJournalFd = storage.FileDesc{} + db.frozenMem.decref() + db.frozenMem = nil + db.memMu.Unlock() +} + +// Clear mems ptr; used by DB.Close(). +func (db *DB) clearMems() { + db.memMu.Lock() + db.mem = nil + db.frozenMem = nil + db.memMu.Unlock() +} + +// Set closed flag; return true if not already closed. +func (db *DB) setClosed() bool { + return atomic.CompareAndSwapUint32(&db.closed, 0, 1) +} + +// Check whether DB was closed. +func (db *DB) isClosed() bool { + return atomic.LoadUint32(&db.closed) != 0 +} + +// Check read ok status. +func (db *DB) ok() error { + if db.isClosed() { + return ErrClosed + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go new file mode 100644 index 0000000..1a00001 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go @@ -0,0 +1,329 @@ +// Copyright (c) 2016, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var errTransactionDone = errors.New("leveldb: transaction already closed") + +// Transaction is the transaction handle. +type Transaction struct { + db *DB + lk sync.RWMutex + seq uint64 + mem *memDB + tables tFiles + ikScratch []byte + rec sessionRecord + stats cStatStaging + closed bool +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contains the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return nil, errTransactionDone + } + return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Has returns. +func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return false, errTransactionDone + } + return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// NewIterator returns an iterator for the latest snapshot of the transaction. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently while writes to the +// transaction. The resultant key/value pairs are guaranteed to be consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return iterator.NewEmptyIterator(errTransactionDone) + } + tr.mem.incref() + return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro) +} + +func (tr *Transaction) flush() error { + // Flush memdb. + if tr.mem.Len() != 0 { + tr.stats.startTimer() + iter := tr.mem.NewIterator(nil) + t, n, err := tr.db.s.tops.createFrom(iter) + iter.Release() + tr.stats.stopTimer() + if err != nil { + return err + } + if tr.mem.getref() == 1 { + tr.mem.Reset() + } else { + tr.mem.decref() + tr.mem = tr.db.mpoolGet(0) + tr.mem.incref() + } + tr.tables = append(tr.tables, t) + tr.rec.addTableFile(0, t) + tr.stats.write += t.size + tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + } + return nil +} + +func (tr *Transaction) put(kt keyType, key, value []byte) error { + tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt) + if tr.mem.Free() < len(tr.ikScratch)+len(value) { + if err := tr.flush(); err != nil { + return err + } + } + if err := tr.mem.Put(tr.ikScratch, value); err != nil { + return err + } + tr.seq++ + return nil +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Put returns. +func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeVal, key, value) +} + +// Delete deletes the value for the given key. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeDel, key, nil) +} + +// Write apply the given batch to the transaction. The batch will be applied +// sequentially. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Write returns. +func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error { + if b == nil || b.Len() == 0 { + return nil + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return b.replayInternal(func(i int, kt keyType, k, v []byte) error { + return tr.put(kt, k, v) + }) +} + +func (tr *Transaction) setDone() { + tr.closed = true + tr.db.tr = nil + tr.mem.decref() + <-tr.db.writeLockC +} + +// Commit commits the transaction. If error is not nil, then the transaction is +// not committed, it can then either be retried or discarded. +// +// Other methods should not be called after transaction has been committed. +func (tr *Transaction) Commit() error { + if err := tr.db.ok(); err != nil { + return err + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + if err := tr.flush(); err != nil { + // Return error, lets user decide either to retry or discard + // transaction. + return err + } + if len(tr.tables) != 0 { + // Committing transaction. + tr.rec.setSeqNum(tr.seq) + tr.db.compCommitLk.Lock() + tr.stats.startTimer() + var cerr error + for retry := 0; retry < 3; retry++ { + cerr = tr.db.s.commit(&tr.rec) + if cerr != nil { + tr.db.logf("transaction@commit error R·%d %q", retry, cerr) + select { + case <-time.After(time.Second): + case <-tr.db.closeC: + tr.db.logf("transaction@commit exiting") + tr.db.compCommitLk.Unlock() + return cerr + } + } else { + // Success. Set db.seq. + tr.db.setSeq(tr.seq) + break + } + } + tr.stats.stopTimer() + if cerr != nil { + // Return error, lets user decide either to retry or discard + // transaction. + return cerr + } + + // Update compaction stats. This is safe as long as we hold compCommitLk. + tr.db.compStats.addStat(0, &tr.stats) + + // Trigger table auto-compaction. + tr.db.compTrigger(tr.db.tcompCmdC) + tr.db.compCommitLk.Unlock() + + // Additionally, wait compaction when certain threshold reached. + // Ignore error, returns error only if transaction can't be committed. + tr.db.waitCompaction() + } + // Only mark as done if transaction committed successfully. + tr.setDone() + return nil +} + +func (tr *Transaction) discard() { + // Discard transaction. + for _, t := range tr.tables { + tr.db.logf("transaction@discard @%d", t.fd.Num) + if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil { + tr.db.s.reuseFileNum(t.fd.Num) + } + } +} + +// Discard discards the transaction. +// +// Other methods should not be called after transaction has been discarded. +func (tr *Transaction) Discard() { + tr.lk.Lock() + if !tr.closed { + tr.discard() + tr.setDone() + } + tr.lk.Unlock() +} + +func (db *DB) waitCompaction() error { + if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() { + return db.compTriggerWait(db.tcompCmdC) + } + return nil +} + +// OpenTransaction opens an atomic DB transaction. Only one transaction can be +// opened at a time. Subsequent call to Write and OpenTransaction will be blocked +// until in-flight transaction is committed or discarded. +// The returned transaction handle is safe for concurrent use. +// +// Transaction is expensive and can overwhelm compaction, especially if +// transaction size is small. Use with caution. +// +// The transaction must be closed once done, either by committing or discarding +// the transaction. +// Closing the DB will discard open transaction. +func (db *DB) OpenTransaction() (*Transaction, error) { + if err := db.ok(); err != nil { + return nil, err + } + + // The write happen synchronously. + select { + case db.writeLockC <- struct{}{}: + case err := <-db.compPerErrC: + return nil, err + case <-db.closeC: + return nil, ErrClosed + } + + if db.tr != nil { + panic("leveldb: has open transaction") + } + + // Flush current memdb. + if db.mem != nil && db.mem.Len() != 0 { + if _, err := db.rotateMem(0, true); err != nil { + return nil, err + } + } + + // Wait compaction when certain threshold reached. + if err := db.waitCompaction(); err != nil { + return nil, err + } + + tr := &Transaction{ + db: db, + seq: db.seq, + mem: db.mpoolGet(0), + } + tr.mem.incref() + db.tr = tr + return tr, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go new file mode 100644 index 0000000..3f06548 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go @@ -0,0 +1,102 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Reader is the interface that wraps basic Get and NewIterator methods. +// This interface implemented by both DB and Snapshot. +type Reader interface { + Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) + NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator +} + +// Sizes is list of size. +type Sizes []int64 + +// Sum returns sum of the sizes. +func (sizes Sizes) Sum() int64 { + var sum int64 + for _, size := range sizes { + sum += size + } + return sum +} + +// Logging. +func (db *DB) log(v ...interface{}) { db.s.log(v...) } +func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } + +// Check and clean files. +func (db *DB) checkAndCleanFiles() error { + v := db.s.version() + defer v.release() + + tmap := make(map[int64]bool) + for _, tables := range v.levels { + for _, t := range tables { + tmap[t.fd.Num] = false + } + } + + fds, err := db.s.stor.List(storage.TypeAll) + if err != nil { + return err + } + + var nt int + var rem []storage.FileDesc + for _, fd := range fds { + keep := true + switch fd.Type { + case storage.TypeManifest: + keep = fd.Num >= db.s.manifestFd.Num + case storage.TypeJournal: + if !db.frozenJournalFd.Zero() { + keep = fd.Num >= db.frozenJournalFd.Num + } else { + keep = fd.Num >= db.journalFd.Num + } + case storage.TypeTable: + _, keep = tmap[fd.Num] + if keep { + tmap[fd.Num] = true + nt++ + } + } + + if !keep { + rem = append(rem, fd) + } + } + + if nt != len(tmap) { + var mfds []storage.FileDesc + for num, present := range tmap { + if !present { + mfds = append(mfds, storage.FileDesc{Type: storage.TypeTable, Num: num}) + db.logf("db@janitor table missing @%d", num) + } + } + return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds}) + } + + db.logf("db@janitor F·%d G·%d", len(fds), len(rem)) + for _, fd := range rem { + db.logf("db@janitor removing %s-%d", fd.Type, fd.Num) + if err := db.s.stor.Remove(fd); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go new file mode 100644 index 0000000..db0c1be --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go @@ -0,0 +1,464 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error { + wr, err := db.journal.Next() + if err != nil { + return err + } + if err := writeBatchesWithHeader(wr, batches, seq); err != nil { + return err + } + if err := db.journal.Flush(); err != nil { + return err + } + if sync { + return db.journalWriter.Sync() + } + return nil +} + +func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) { + retryLimit := 3 +retry: + // Wait for pending memdb compaction. + err = db.compTriggerWait(db.mcompCmdC) + if err != nil { + return + } + retryLimit-- + + // Create new memdb and journal. + mem, err = db.newMem(n) + if err != nil { + if err == errHasFrozenMem { + if retryLimit <= 0 { + panic("BUG: still has frozen memdb") + } + goto retry + } + return + } + + // Schedule memdb compaction. + if wait { + err = db.compTriggerWait(db.mcompCmdC) + } else { + db.compTrigger(db.mcompCmdC) + } + return +} + +func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { + delayed := false + slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger() + pauseTrigger := db.s.o.GetWriteL0PauseTrigger() + flush := func() (retry bool) { + mdb = db.getEffectiveMem() + if mdb == nil { + err = ErrClosed + return false + } + defer func() { + if retry { + mdb.decref() + mdb = nil + } + }() + tLen := db.s.tLen(0) + mdbFree = mdb.Free() + switch { + case tLen >= slowdownTrigger && !delayed: + delayed = true + time.Sleep(time.Millisecond) + case mdbFree >= n: + return false + case tLen >= pauseTrigger: + delayed = true + // Set the write paused flag explicitly. + atomic.StoreInt32(&db.inWritePaused, 1) + err = db.compTriggerWait(db.tcompCmdC) + // Unset the write paused flag. + atomic.StoreInt32(&db.inWritePaused, 0) + if err != nil { + return false + } + default: + // Allow memdb to grow if it has no entry. + if mdb.Len() == 0 { + mdbFree = n + } else { + mdb.decref() + mdb, err = db.rotateMem(n, false) + if err == nil { + mdbFree = mdb.Free() + } else { + mdbFree = 0 + } + } + return false + } + return true + } + start := time.Now() + for flush() { + } + if delayed { + db.writeDelay += time.Since(start) + db.writeDelayN++ + } else if db.writeDelayN > 0 { + db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) + atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN)) + atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay)) + db.writeDelay = 0 + db.writeDelayN = 0 + } + return +} + +type writeMerge struct { + sync bool + batch *Batch + keyType keyType + key, value []byte +} + +func (db *DB) unlockWrite(overflow bool, merged int, err error) { + for i := 0; i < merged; i++ { + db.writeAckC <- err + } + if overflow { + // Pass lock to the next write (that failed to merge). + db.writeMergedC <- false + } else { + // Release lock. + <-db.writeLockC + } +} + +// ourBatch is batch that we can modify. +func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { + // Try to flush memdb. This method would also trying to throttle writes + // if it is too fast and compaction cannot catch-up. + mdb, mdbFree, err := db.flush(batch.internalLen) + if err != nil { + db.unlockWrite(false, 0, err) + return err + } + defer mdb.decref() + + var ( + overflow bool + merged int + batches = []*Batch{batch} + ) + + if merge { + // Merge limit. + var mergeLimit int + if batch.internalLen > 128<<10 { + mergeLimit = (1 << 20) - batch.internalLen + } else { + mergeLimit = 128 << 10 + } + mergeCap := mdbFree - batch.internalLen + if mergeLimit > mergeCap { + mergeLimit = mergeCap + } + + merge: + for mergeLimit > 0 { + select { + case incoming := <-db.writeMergeC: + if incoming.batch != nil { + // Merge batch. + if incoming.batch.internalLen > mergeLimit { + overflow = true + break merge + } + batches = append(batches, incoming.batch) + mergeLimit -= incoming.batch.internalLen + } else { + // Merge put. + internalLen := len(incoming.key) + len(incoming.value) + 8 + if internalLen > mergeLimit { + overflow = true + break merge + } + if ourBatch == nil { + ourBatch = db.batchPool.Get().(*Batch) + ourBatch.Reset() + batches = append(batches, ourBatch) + } + // We can use same batch since concurrent write doesn't + // guarantee write order. + ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value) + mergeLimit -= internalLen + } + sync = sync || incoming.sync + merged++ + db.writeMergedC <- true + + default: + break merge + } + } + } + + // Release ourBatch if any. + if ourBatch != nil { + defer db.batchPool.Put(ourBatch) + } + + // Seq number. + seq := db.seq + 1 + + // Write journal. + if err := db.writeJournal(batches, seq, sync); err != nil { + db.unlockWrite(overflow, merged, err) + return err + } + + // Put batches. + for _, batch := range batches { + if err := batch.putMem(seq, mdb.DB); err != nil { + panic(err) + } + seq += uint64(batch.Len()) + } + + // Incr seq number. + db.addSeq(uint64(batchesLen(batches))) + + // Rotate memdb if it's reach the threshold. + if batch.internalLen >= mdbFree { + db.rotateMem(0, false) + } + + db.unlockWrite(overflow, merged, nil) + return nil +} + +// Write apply the given batch to the DB. The batch records will be applied +// sequentially. Write might be used concurrently, when used concurrently and +// batch is small enough, write will try to merge the batches. Set NoWriteMerge +// option to true to disable write merge. +// +// It is safe to modify the contents of the arguments after Write returns but +// not before. Write will not modify content of the batch. +func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error { + if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 { + return err + } + + // If the batch size is larger than write buffer, it may justified to write + // using transaction instead. Using transaction the batch will be written + // into tables directly, skipping the journaling. + if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() { + tr, err := db.OpenTransaction() + if err != nil { + return err + } + if err := tr.Write(batch, wo); err != nil { + tr.Discard() + return err + } + return tr.Commit() + } + + merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() + sync := wo.GetSync() && !db.s.o.GetNoSync() + + // Acquire write lock. + if merge { + select { + case db.writeMergeC <- writeMerge{sync: sync, batch: batch}: + if <-db.writeMergedC { + // Write is merged. + return <-db.writeAckC + } + // Write is not merged, the write lock is handed to us. Continue. + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } else { + select { + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } + + return db.writeLocked(batch, nil, merge, sync) +} + +func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error { + if err := db.ok(); err != nil { + return err + } + + merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() + sync := wo.GetSync() && !db.s.o.GetNoSync() + + // Acquire write lock. + if merge { + select { + case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}: + if <-db.writeMergedC { + // Write is merged. + return <-db.writeAckC + } + // Write is not merged, the write lock is handed to us. Continue. + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } else { + select { + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } + + batch := db.batchPool.Get().(*Batch) + batch.Reset() + batch.appendRec(kt, key, value) + return db.writeLocked(batch, batch, merge, sync) +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. Write merge also applies for Put, see +// Write. +// +// It is safe to modify the contents of the arguments after Put returns but not +// before. +func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { + return db.putRec(keyTypeVal, key, value, wo) +} + +// Delete deletes the value for the given key. Delete will not returns error if +// key doesn't exist. Write merge also applies for Delete, see Write. +// +// It is safe to modify the contents of the arguments after Delete returns but +// not before. +func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { + return db.putRec(keyTypeDel, key, nil, wo) +} + +func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { + iter := mem.NewIterator(nil) + defer iter.Release() + return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) && + (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0)) +} + +// CompactRange compacts the underlying DB for the given key range. +// In particular, deleted and overwritten versions are discarded, +// and the data is rearranged to reduce the cost of operations +// needed to access the data. This operation should typically only +// be invoked by users who understand the underlying implementation. +// +// A nil Range.Start is treated as a key before all keys in the DB. +// And a nil Range.Limit is treated as a key after all keys in the DB. +// Therefore if both is nil then it will compact entire DB. +func (db *DB) CompactRange(r util.Range) error { + if err := db.ok(); err != nil { + return err + } + + // Lock writer. + select { + case db.writeLockC <- struct{}{}: + case err := <-db.compPerErrC: + return err + case <-db.closeC: + return ErrClosed + } + + // Check for overlaps in memdb. + mdb := db.getEffectiveMem() + if mdb == nil { + return ErrClosed + } + defer mdb.decref() + if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { + // Memdb compaction. + if _, err := db.rotateMem(0, false); err != nil { + <-db.writeLockC + return err + } + <-db.writeLockC + if err := db.compTriggerWait(db.mcompCmdC); err != nil { + return err + } + } else { + <-db.writeLockC + } + + // Table compaction. + return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit) +} + +// SetReadOnly makes DB read-only. It will stay read-only until reopened. +func (db *DB) SetReadOnly() error { + if err := db.ok(); err != nil { + return err + } + + // Lock writer. + select { + case db.writeLockC <- struct{}{}: + db.compWriteLocking = true + case err := <-db.compPerErrC: + return err + case <-db.closeC: + return ErrClosed + } + + // Set compaction read-only. + select { + case db.compErrSetC <- ErrReadOnly: + case perr := <-db.compPerErrC: + return perr + case <-db.closeC: + return ErrClosed + } + + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go new file mode 100644 index 0000000..be768e5 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go @@ -0,0 +1,92 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package leveldb provides implementation of LevelDB key/value database. +// +// Create or open a database: +// +// // The returned DB instance is safe for concurrent use. Which mean that all +// // DB's methods may be called concurrently from multiple goroutine. +// db, err := leveldb.OpenFile("path/to/db", nil) +// ... +// defer db.Close() +// ... +// +// Read or modify the database content: +// +// // Remember that the contents of the returned slice should not be modified. +// data, err := db.Get([]byte("key"), nil) +// ... +// err = db.Put([]byte("key"), []byte("value"), nil) +// ... +// err = db.Delete([]byte("key"), nil) +// ... +// +// Iterate over database content: +// +// iter := db.NewIterator(nil, nil) +// for iter.Next() { +// // Remember that the contents of the returned slice should not be modified, and +// // only valid until the next call to Next. +// key := iter.Key() +// value := iter.Value() +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content with a particular prefix: +// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Seek-then-Iterate: +// +// iter := db.NewIterator(nil, nil) +// for ok := iter.Seek(key); ok; ok = iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content: +// +// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Batch writes: +// +// batch := new(leveldb.Batch) +// batch.Put([]byte("foo"), []byte("value")) +// batch.Put([]byte("bar"), []byte("another value")) +// batch.Delete([]byte("baz")) +// err = db.Write(batch, nil) +// ... +// +// Use bloom filter: +// +// o := &opt.Options{ +// Filter: filter.NewBloomFilter(10), +// } +// db, err := leveldb.OpenFile("path/to/db", o) +// ... +// defer db.Close() +// ... +package leveldb diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go new file mode 100644 index 0000000..de26498 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go @@ -0,0 +1,20 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" +) + +// Common errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrReadOnly = errors.New("leveldb: read-only mode") + ErrSnapshotReleased = errors.New("leveldb: snapshot released") + ErrIterReleased = errors.New("leveldb: iterator released") + ErrClosed = errors.New("leveldb: closed") +) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go new file mode 100644 index 0000000..8d6146b --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go @@ -0,0 +1,78 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package errors provides common error types used throughout leveldb. +package errors + +import ( + "errors" + "fmt" + + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Common errors. +var ( + ErrNotFound = New("leveldb: not found") + ErrReleased = util.ErrReleased + ErrHasReleaser = util.ErrHasReleaser +) + +// New returns an error that formats as the given text. +func New(text string) error { + return errors.New(text) +} + +// ErrCorrupted is the type that wraps errors that indicate corruption in +// the database. +type ErrCorrupted struct { + Fd storage.FileDesc + Err error +} + +func (e *ErrCorrupted) Error() string { + if !e.Fd.Zero() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) + } + return e.Err.Error() +} + +// NewErrCorrupted creates new ErrCorrupted error. +func NewErrCorrupted(fd storage.FileDesc, err error) error { + return &ErrCorrupted{fd, err} +} + +// IsCorrupted returns a boolean indicating whether the error is indicating +// a corruption. +func IsCorrupted(err error) bool { + switch err.(type) { + case *ErrCorrupted: + return true + case *storage.ErrCorrupted: + return true + } + return false +} + +// ErrMissingFiles is the type that indicating a corruption due to missing +// files. ErrMissingFiles always wrapped with ErrCorrupted. +type ErrMissingFiles struct { + Fds []storage.FileDesc +} + +func (e *ErrMissingFiles) Error() string { return "file missing" } + +// SetFd sets 'file info' of the given error with the given file. +// Currently only ErrCorrupted is supported, otherwise will do nothing. +func SetFd(err error, fd storage.FileDesc) error { + switch x := err.(type) { + case *ErrCorrupted: + x.Fd = fd + return x + } + return err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go new file mode 100644 index 0000000..e961e42 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go @@ -0,0 +1,31 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/filter" +) + +type iFilter struct { + filter.Filter +} + +func (f iFilter) Contains(filter, key []byte) bool { + return f.Filter.Contains(filter, internalKey(key).ukey()) +} + +func (f iFilter) NewGenerator() filter.FilterGenerator { + return iFilterGenerator{f.Filter.NewGenerator()} +} + +type iFilterGenerator struct { + filter.FilterGenerator +} + +func (g iFilterGenerator) Add(key []byte) { + g.FilterGenerator.Add(internalKey(key).ukey()) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go new file mode 100644 index 0000000..bab0e99 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go @@ -0,0 +1,116 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package filter + +import ( + "github.com/syndtr/goleveldb/leveldb/util" +) + +func bloomHash(key []byte) uint32 { + return util.Hash(key, 0xbc9f1d34) +} + +type bloomFilter int + +// The bloom filter serializes its parameters and is backward compatible +// with respect to them. Therefor, its parameters are not added to its +// name. +func (bloomFilter) Name() string { + return "leveldb.BuiltinBloomFilter" +} + +func (f bloomFilter) Contains(filter, key []byte) bool { + nBytes := len(filter) - 1 + if nBytes < 1 { + return false + } + nBits := uint32(nBytes * 8) + + // Use the encoded k so that we can read filters generated by + // bloom filters created using different parameters. + k := filter[nBytes] + if k > 30 { + // Reserved for potentially new encodings for short bloom filters. + // Consider it a match. + return true + } + + kh := bloomHash(key) + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < k; j++ { + bitpos := kh % nBits + if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { + return false + } + kh += delta + } + return true +} + +func (f bloomFilter) NewGenerator() FilterGenerator { + // Round down to reduce probing cost a little bit. + k := uint8(f * 69 / 100) // 0.69 =~ ln(2) + if k < 1 { + k = 1 + } else if k > 30 { + k = 30 + } + return &bloomFilterGenerator{ + n: int(f), + k: k, + } +} + +type bloomFilterGenerator struct { + n int + k uint8 + + keyHashes []uint32 +} + +func (g *bloomFilterGenerator) Add(key []byte) { + // Use double-hashing to generate a sequence of hash values. + // See analysis in [Kirsch,Mitzenmacher 2006]. + g.keyHashes = append(g.keyHashes, bloomHash(key)) +} + +func (g *bloomFilterGenerator) Generate(b Buffer) { + // Compute bloom filter size (in both bits and bytes) + nBits := uint32(len(g.keyHashes) * g.n) + // For small n, we can see a very high false positive rate. Fix it + // by enforcing a minimum bloom filter length. + if nBits < 64 { + nBits = 64 + } + nBytes := (nBits + 7) / 8 + nBits = nBytes * 8 + + dest := b.Alloc(int(nBytes) + 1) + dest[nBytes] = g.k + for _, kh := range g.keyHashes { + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < g.k; j++ { + bitpos := kh % nBits + dest[bitpos/8] |= (1 << (bitpos % 8)) + kh += delta + } + } + + g.keyHashes = g.keyHashes[:0] +} + +// NewBloomFilter creates a new initialized bloom filter for given +// bitsPerKey. +// +// Since bitsPerKey is persisted individually for each bloom filter +// serialization, bloom filters are backwards compatible with respect to +// changing bitsPerKey. This means that no big performance penalty will +// be experienced when changing the parameter. See documentation for +// opt.Options.Filter for more information. +func NewBloomFilter(bitsPerKey int) Filter { + return bloomFilter(bitsPerKey) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go new file mode 100644 index 0000000..7a925c5 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go @@ -0,0 +1,60 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package filter provides interface and implementation of probabilistic +// data structure. +// +// The filter is resposible for creating small filter from a set of keys. +// These filter will then used to test whether a key is a member of the set. +// In many cases, a filter can cut down the number of disk seeks from a +// handful to a single disk seek per DB.Get call. +package filter + +// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. +type Buffer interface { + // Alloc allocs n bytes of slice from the buffer. This also advancing + // write offset. + Alloc(n int) []byte + + // Write appends the contents of p to the buffer. + Write(p []byte) (n int, err error) + + // WriteByte appends the byte c to the buffer. + WriteByte(c byte) error +} + +// Filter is the filter. +type Filter interface { + // Name returns the name of this policy. + // + // Note that if the filter encoding changes in an incompatible way, + // the name returned by this method must be changed. Otherwise, old + // incompatible filters may be passed to methods of this type. + Name() string + + // NewGenerator creates a new filter generator. + NewGenerator() FilterGenerator + + // Contains returns true if the filter contains the given key. + // + // The filter are filters generated by the filter generator. + Contains(filter, key []byte) bool +} + +// FilterGenerator is the filter generator. +type FilterGenerator interface { + // Add adds a key to the filter generator. + // + // The key may become invalid after call to this method end, therefor + // key must be copied if implementation require keeping key for later + // use. The key should not modified directly, doing so may cause + // undefined results. + Add(key []byte) + + // Generate generates filters based on keys passed so far. After call + // to Generate the filter generator maybe resetted, depends on implementation. + Generate(b Buffer) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go new file mode 100644 index 0000000..a23ab05 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go @@ -0,0 +1,184 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/util" +) + +// BasicArray is the interface that wraps basic Len and Search method. +type BasicArray interface { + // Len returns length of the array. + Len() int + + // Search finds smallest index that point to a key that is greater + // than or equal to the given key. + Search(key []byte) int +} + +// Array is the interface that wraps BasicArray and basic Index method. +type Array interface { + BasicArray + + // Index returns key/value pair with index of i. + Index(i int) (key, value []byte) +} + +// Array is the interface that wraps BasicArray and basic Get method. +type ArrayIndexer interface { + BasicArray + + // Get returns a new data iterator with index of i. + Get(i int) Iterator +} + +type basicArrayIterator struct { + util.BasicReleaser + array BasicArray + pos int + err error +} + +func (i *basicArrayIterator) Valid() bool { + return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() +} + +func (i *basicArrayIterator) First() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.array.Len() == 0 { + i.pos = -1 + return false + } + i.pos = 0 + return true +} + +func (i *basicArrayIterator) Last() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = n - 1 + return true +} + +func (i *basicArrayIterator) Seek(key []byte) bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = i.array.Search(key) + if i.pos >= n { + return false + } + return true +} + +func (i *basicArrayIterator) Next() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.pos++ + if n := i.array.Len(); i.pos >= n { + i.pos = n + return false + } + return true +} + +func (i *basicArrayIterator) Prev() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.pos-- + if i.pos < 0 { + i.pos = -1 + return false + } + return true +} + +func (i *basicArrayIterator) Error() error { return i.err } + +type arrayIterator struct { + basicArrayIterator + array Array + pos int + key, value []byte +} + +func (i *arrayIterator) updateKV() { + if i.pos == i.basicArrayIterator.pos { + return + } + i.pos = i.basicArrayIterator.pos + if i.Valid() { + i.key, i.value = i.array.Index(i.pos) + } else { + i.key = nil + i.value = nil + } +} + +func (i *arrayIterator) Key() []byte { + i.updateKV() + return i.key +} + +func (i *arrayIterator) Value() []byte { + i.updateKV() + return i.value +} + +type arrayIteratorIndexer struct { + basicArrayIterator + array ArrayIndexer +} + +func (i *arrayIteratorIndexer) Get() Iterator { + if i.Valid() { + return i.array.Get(i.basicArrayIterator.pos) + } + return nil +} + +// NewArrayIterator returns an iterator from the given array. +func NewArrayIterator(array Array) Iterator { + return &arrayIterator{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + pos: -1, + } +} + +// NewArrayIndexer returns an index iterator from the given array. +func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { + return &arrayIteratorIndexer{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go new file mode 100644 index 0000000..939adbb --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go @@ -0,0 +1,242 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// IteratorIndexer is the interface that wraps CommonIterator and basic Get +// method. IteratorIndexer provides index for indexed iterator. +type IteratorIndexer interface { + CommonIterator + + // Get returns a new data iterator for the current position, or nil if + // done. + Get() Iterator +} + +type indexedIterator struct { + util.BasicReleaser + index IteratorIndexer + strict bool + + data Iterator + err error + errf func(err error) + closed bool +} + +func (i *indexedIterator) setData() { + if i.data != nil { + i.data.Release() + } + i.data = i.index.Get() +} + +func (i *indexedIterator) clearData() { + if i.data != nil { + i.data.Release() + } + i.data = nil +} + +func (i *indexedIterator) indexErr() { + if err := i.index.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + i.err = err + } +} + +func (i *indexedIterator) dataErr() bool { + if err := i.data.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + if i.strict || !errors.IsCorrupted(err) { + i.err = err + return true + } + } + return false +} + +func (i *indexedIterator) Valid() bool { + return i.data != nil && i.data.Valid() +} + +func (i *indexedIterator) First() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.First() { + i.indexErr() + i.clearData() + return false + } + i.setData() + return i.Next() +} + +func (i *indexedIterator) Last() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.Last() { + i.indexErr() + i.clearData() + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + return true +} + +func (i *indexedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.Seek(key) { + i.indexErr() + i.clearData() + return false + } + i.setData() + if !i.data.Seek(key) { + if i.dataErr() { + return false + } + i.clearData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Next() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + switch { + case i.data != nil && !i.data.Next(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Next() { + i.indexErr() + return false + } + i.setData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Prev() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + switch { + case i.data != nil && !i.data.Prev(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Prev() { + i.indexErr() + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + } + return true +} + +func (i *indexedIterator) Key() []byte { + if i.data == nil { + return nil + } + return i.data.Key() +} + +func (i *indexedIterator) Value() []byte { + if i.data == nil { + return nil + } + return i.data.Value() +} + +func (i *indexedIterator) Release() { + i.clearData() + i.index.Release() + i.BasicReleaser.Release() +} + +func (i *indexedIterator) Error() error { + if i.err != nil { + return i.err + } + if err := i.index.Error(); err != nil { + return err + } + return nil +} + +func (i *indexedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewIndexedIterator returns an 'indexed iterator'. An index is iterator +// that returns another iterator, a 'data iterator'. A 'data iterator' is the +// iterator that contains actual key/value pairs. +// +// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) +// won't be ignored and will halt 'indexed iterator', otherwise the iterator will +// continue to the next 'data iterator'. Corruption on 'index iterator' will not be +// ignored and will halt the iterator. +func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { + return &indexedIterator{index: index, strict: strict} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go new file mode 100644 index 0000000..96fb0f6 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go @@ -0,0 +1,132 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package iterator provides interface and implementation to traverse over +// contents of a database. +package iterator + +import ( + "errors" + + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrIterReleased = errors.New("leveldb/iterator: iterator released") +) + +// IteratorSeeker is the interface that wraps the 'seeks method'. +type IteratorSeeker interface { + // First moves the iterator to the first key/value pair. If the iterator + // only contains one key/value pair then First and Last would moves + // to the same key/value pair. + // It returns whether such pair exist. + First() bool + + // Last moves the iterator to the last key/value pair. If the iterator + // only contains one key/value pair then First and Last would moves + // to the same key/value pair. + // It returns whether such pair exist. + Last() bool + + // Seek moves the iterator to the first key/value pair whose key is greater + // than or equal to the given key. + // It returns whether such pair exist. + // + // It is safe to modify the contents of the argument after Seek returns. + Seek(key []byte) bool + + // Next moves the iterator to the next key/value pair. + // It returns false if the iterator is exhausted. + Next() bool + + // Prev moves the iterator to the previous key/value pair. + // It returns false if the iterator is exhausted. + Prev() bool +} + +// CommonIterator is the interface that wraps common iterator methods. +type CommonIterator interface { + IteratorSeeker + + // util.Releaser is the interface that wraps basic Release method. + // When called Release will releases any resources associated with the + // iterator. + util.Releaser + + // util.ReleaseSetter is the interface that wraps the basic SetReleaser + // method. + util.ReleaseSetter + + // TODO: Remove this when ready. + Valid() bool + + // Error returns any accumulated error. Exhausting all the key/value pairs + // is not considered to be an error. + Error() error +} + +// Iterator iterates over a DB's key/value pairs in key order. +// +// When encounter an error any 'seeks method' will return false and will +// yield no key/value pairs. The error can be queried by calling the Error +// method. Calling Release is still necessary. +// +// An iterator must be released after use, but it is not necessary to read +// an iterator until exhaustion. +// Also, an iterator is not necessarily safe for concurrent use, but it is +// safe to use multiple iterators concurrently, with each in a dedicated +// goroutine. +type Iterator interface { + CommonIterator + + // Key returns the key of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Key() []byte + + // Value returns the value of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Value() []byte +} + +// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback +// method. +// +// ErrorCallbackSetter implemented by indexed and merged iterator. +type ErrorCallbackSetter interface { + // SetErrorCallback allows set an error callback of the corresponding + // iterator. Use nil to clear the callback. + SetErrorCallback(f func(err error)) +} + +type emptyIterator struct { + util.BasicReleaser + err error +} + +func (i *emptyIterator) rErr() { + if i.err == nil && i.Released() { + i.err = ErrIterReleased + } +} + +func (*emptyIterator) Valid() bool { return false } +func (i *emptyIterator) First() bool { i.rErr(); return false } +func (i *emptyIterator) Last() bool { i.rErr(); return false } +func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } +func (i *emptyIterator) Next() bool { i.rErr(); return false } +func (i *emptyIterator) Prev() bool { i.rErr(); return false } +func (*emptyIterator) Key() []byte { return nil } +func (*emptyIterator) Value() []byte { return nil } +func (i *emptyIterator) Error() error { return i.err } + +// NewEmptyIterator creates an empty iterator. The err parameter can be +// nil, but if not nil the given err will be returned by Error method. +func NewEmptyIterator(err error) Iterator { + return &emptyIterator{err: err} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go new file mode 100644 index 0000000..1a7e29d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go @@ -0,0 +1,304 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type mergedIterator struct { + cmp comparer.Comparer + iters []Iterator + strict bool + + keys [][]byte + index int + dir dir + err error + errf func(err error) + releaser util.Releaser +} + +func assertKey(key []byte) []byte { + if key == nil { + panic("leveldb/iterator: nil key") + } + return key +} + +func (i *mergedIterator) iterErr(iter Iterator) bool { + if err := iter.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + if i.strict || !errors.IsCorrupted(err) { + i.err = err + return true + } + } + return false +} + +func (i *mergedIterator) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *mergedIterator) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.First(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirEOI + return i.prev() +} + +func (i *mergedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Seek(key): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) next() bool { + var key []byte + if i.dir == dirForward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirEOI + return false + } + i.dir = dirForward + return true +} + +func (i *mergedIterator) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirSOI: + return i.First() + case dirBackward: + key := append([]byte{}, i.keys[i.index]...) + if !i.Seek(key) { + return false + } + return i.Next() + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Next(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.next() +} + +func (i *mergedIterator) prev() bool { + var key []byte + if i.dir == dirBackward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirSOI + return false + } + i.dir = dirBackward + return true +} + +func (i *mergedIterator) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + key := append([]byte{}, i.keys[i.index]...) + for x, iter := range i.iters { + if x == i.index { + continue + } + seek := iter.Seek(key) + switch { + case seek && iter.Prev(), !seek && iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Prev(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.prev() +} + +func (i *mergedIterator) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.keys[i.index] +} + +func (i *mergedIterator) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.iters[i.index].Value() +} + +func (i *mergedIterator) Release() { + if i.dir != dirReleased { + i.dir = dirReleased + for _, iter := range i.iters { + iter.Release() + } + i.iters = nil + i.keys = nil + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *mergedIterator) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *mergedIterator) Error() error { + return i.err +} + +func (i *mergedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewMergedIterator returns an iterator that merges its input. Walking the +// resultant iterator will return all key/value pairs of all input iterators +// in strictly increasing key order, as defined by cmp. +// The input's key ranges may overlap, but there are assumed to be no duplicate +// keys: if iters[i] contains a key k then iters[j] will not contain that key k. +// None of the iters may be nil. +// +// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) +// won't be ignored and will halt 'merged iterator', otherwise the iterator will +// continue to the next 'input iterator'. +func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { + return &mergedIterator{ + iters: iters, + cmp: cmp, + strict: strict, + keys: make([][]byte, len(iters)), + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go new file mode 100644 index 0000000..d094c3d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go @@ -0,0 +1,524 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 +// License, authors and contributors informations can be found at bellow URLs respectively: +// https://code.google.com/p/leveldb-go/source/browse/LICENSE +// https://code.google.com/p/leveldb-go/source/browse/AUTHORS +// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS + +// Package journal reads and writes sequences of journals. Each journal is a stream +// of bytes that completes before the next journal starts. +// +// When reading, call Next to obtain an io.Reader for the next journal. Next will +// return io.EOF when there are no more journals. It is valid to call Next +// without reading the current journal to exhaustion. +// +// When writing, call Next to obtain an io.Writer for the next journal. Calling +// Next finishes the current journal. Call Close to finish the final journal. +// +// Optionally, call Flush to finish the current journal and flush the underlying +// writer without starting a new journal. To start a new journal after flushing, +// call Next. +// +// Neither Readers or Writers are safe to use concurrently. +// +// Example code: +// func read(r io.Reader) ([]string, error) { +// var ss []string +// journals := journal.NewReader(r, nil, true, true) +// for { +// j, err := journals.Next() +// if err == io.EOF { +// break +// } +// if err != nil { +// return nil, err +// } +// s, err := ioutil.ReadAll(j) +// if err != nil { +// return nil, err +// } +// ss = append(ss, string(s)) +// } +// return ss, nil +// } +// +// func write(w io.Writer, ss []string) error { +// journals := journal.NewWriter(w) +// for _, s := range ss { +// j, err := journals.Next() +// if err != nil { +// return err +// } +// if _, err := j.Write([]byte(s)), err != nil { +// return err +// } +// } +// return journals.Close() +// } +// +// The wire format is that the stream is divided into 32KiB blocks, and each +// block contains a number of tightly packed chunks. Chunks cannot cross block +// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a +// block must be zero. +// +// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 +// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) +// followed by a payload. The checksum is over the chunk type and the payload. +// +// There are four chunk types: whether the chunk is the full journal, or the +// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal +// has one first chunk, zero or more middle chunks, and one last chunk. +// +// The wire format allows for limited recovery in the face of data corruption: +// on a format error (such as a checksum mismatch), the reader moves to the +// next block and looks for the next full or first chunk. +package journal + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// These constants are part of the wire format and should not be changed. +const ( + fullChunkType = 1 + firstChunkType = 2 + middleChunkType = 3 + lastChunkType = 4 +) + +const ( + blockSize = 32 * 1024 + headerSize = 7 +) + +type flusher interface { + Flush() error +} + +// ErrCorrupted is the error type that generated by corrupted block or chunk. +type ErrCorrupted struct { + Size int + Reason string +} + +func (e *ErrCorrupted) Error() string { + return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) +} + +// Dropper is the interface that wrap simple Drop method. The Drop +// method will be called when the journal reader dropping a block or chunk. +type Dropper interface { + Drop(err error) +} + +// Reader reads journals from an underlying io.Reader. +type Reader struct { + // r is the underlying reader. + r io.Reader + // the dropper. + dropper Dropper + // strict flag. + strict bool + // checksum flag. + checksum bool + // seq is the sequence number of the current journal. + seq int + // buf[i:j] is the unread portion of the current chunk's payload. + // The low bound, i, excludes the chunk header. + i, j int + // n is the number of bytes of buf that are valid. Once reading has started, + // only the final block can have n < blockSize. + n int + // last is whether the current chunk is the last chunk of the journal. + last bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewReader returns a new reader. The dropper may be nil, and if +// strict is true then corrupted or invalid chunk will halt the journal +// reader entirely. +func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { + return &Reader{ + r: r, + dropper: dropper, + strict: strict, + checksum: checksum, + last: true, + } +} + +var errSkip = errors.New("leveldb/journal: skipped") + +func (r *Reader) corrupt(n int, reason string, skip bool) error { + if r.dropper != nil { + r.dropper.Drop(&ErrCorrupted{n, reason}) + } + if r.strict && !skip { + r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason}) + return r.err + } + return errSkip +} + +// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the +// next block into the buffer if necessary. +func (r *Reader) nextChunk(first bool) error { + for { + if r.j+headerSize <= r.n { + checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) + length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) + chunkType := r.buf[r.j+6] + unprocBlock := r.n - r.j + if checksum == 0 && length == 0 && chunkType == 0 { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "zero header", false) + } + if chunkType < fullChunkType || chunkType > lastChunkType { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false) + } + r.i = r.j + headerSize + r.j = r.j + headerSize + int(length) + if r.j > r.n { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "chunk length overflows block", false) + } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "checksum mismatch", false) + } + if first && chunkType != fullChunkType && chunkType != firstChunkType { + chunkLength := (r.j - r.i) + headerSize + r.i = r.j + // Report the error, but skip it. + return r.corrupt(chunkLength, "orphan chunk", true) + } + r.last = chunkType == fullChunkType || chunkType == lastChunkType + return nil + } + + // The last block. + if r.n < blockSize && r.n > 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + + // Read block. + n, err := io.ReadFull(r.r, r.buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return err + } + if n == 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + r.i, r.j, r.n = 0, 0, n + } +} + +// Next returns a reader for the next journal. It returns io.EOF if there are no +// more journals. The reader returned becomes stale after the next Next call, +// and should no longer be used. If strict is false, the reader will returns +// io.ErrUnexpectedEOF error when found corrupted journal. +func (r *Reader) Next() (io.Reader, error) { + r.seq++ + if r.err != nil { + return nil, r.err + } + r.i = r.j + for { + if err := r.nextChunk(true); err == nil { + break + } else if err != errSkip { + return nil, err + } + } + return &singleReader{r, r.seq, nil}, nil +} + +// Reset resets the journal reader, allows reuse of the journal reader. Reset returns +// last accumulated error. +func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { + r.seq++ + err := r.err + r.r = reader + r.dropper = dropper + r.strict = strict + r.checksum = checksum + r.i = 0 + r.j = 0 + r.n = 0 + r.last = true + r.err = nil + return err +} + +type singleReader struct { + r *Reader + seq int + err error +} + +func (x *singleReader) Read(p []byte) (int, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + n := copy(p, r.buf[r.i:r.j]) + r.i += n + return n, nil +} + +func (x *singleReader) ReadByte() (byte, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + c := r.buf[r.i] + r.i++ + return c, nil +} + +// Writer writes journals to an underlying io.Writer. +type Writer struct { + // w is the underlying writer. + w io.Writer + // seq is the sequence number of the current journal. + seq int + // f is w as a flusher. + f flusher + // buf[i:j] is the bytes that will become the current chunk. + // The low bound, i, includes the chunk header. + i, j int + // buf[:written] has already been written to w. + // written is zero unless Flush has been called. + written int + // first is whether the current chunk is the first chunk of the journal. + first bool + // pending is whether a chunk is buffered but not yet written. + pending bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewWriter returns a new Writer. +func NewWriter(w io.Writer) *Writer { + f, _ := w.(flusher) + return &Writer{ + w: w, + f: f, + } +} + +// fillHeader fills in the header for the pending chunk. +func (w *Writer) fillHeader(last bool) { + if w.i+headerSize > w.j || w.j > blockSize { + panic("leveldb/journal: bad writer state") + } + if last { + if w.first { + w.buf[w.i+6] = fullChunkType + } else { + w.buf[w.i+6] = lastChunkType + } + } else { + if w.first { + w.buf[w.i+6] = firstChunkType + } else { + w.buf[w.i+6] = middleChunkType + } + } + binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) + binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) +} + +// writeBlock writes the buffered block to the underlying writer, and reserves +// space for the next chunk's header. +func (w *Writer) writeBlock() { + _, w.err = w.w.Write(w.buf[w.written:]) + w.i = 0 + w.j = headerSize + w.written = 0 +} + +// writePending finishes the current journal and writes the buffer to the +// underlying writer. +func (w *Writer) writePending() { + if w.err != nil { + return + } + if w.pending { + w.fillHeader(true) + w.pending = false + } + _, w.err = w.w.Write(w.buf[w.written:w.j]) + w.written = w.j +} + +// Close finishes the current journal and closes the writer. +func (w *Writer) Close() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + w.err = errors.New("leveldb/journal: closed Writer") + return nil +} + +// Flush finishes the current journal, writes to the underlying writer, and +// flushes it if that writer implements interface{ Flush() error }. +func (w *Writer) Flush() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + if w.f != nil { + w.err = w.f.Flush() + return w.err + } + return nil +} + +// Reset resets the journal writer, allows reuse of the journal writer. Reset +// will also closes the journal writer if not already. +func (w *Writer) Reset(writer io.Writer) (err error) { + w.seq++ + if w.err == nil { + w.writePending() + err = w.err + } + w.w = writer + w.f, _ = writer.(flusher) + w.i = 0 + w.j = 0 + w.written = 0 + w.first = false + w.pending = false + w.err = nil + return +} + +// Next returns a writer for the next journal. The writer returned becomes stale +// after the next Close, Flush or Next call, and should no longer be used. +func (w *Writer) Next() (io.Writer, error) { + w.seq++ + if w.err != nil { + return nil, w.err + } + if w.pending { + w.fillHeader(true) + } + w.i = w.j + w.j = w.j + headerSize + // Check if there is room in the block for the header. + if w.j > blockSize { + // Fill in the rest of the block with zeroes. + for k := w.i; k < blockSize; k++ { + w.buf[k] = 0 + } + w.writeBlock() + if w.err != nil { + return nil, w.err + } + } + w.first = true + w.pending = true + return singleWriter{w, w.seq}, nil +} + +type singleWriter struct { + w *Writer + seq int +} + +func (x singleWriter) Write(p []byte) (int, error) { + w := x.w + if w.seq != x.seq { + return 0, errors.New("leveldb/journal: stale writer") + } + if w.err != nil { + return 0, w.err + } + n0 := len(p) + for len(p) > 0 { + // Write a block, if it is full. + if w.j == blockSize { + w.fillHeader(false) + w.writeBlock() + if w.err != nil { + return 0, w.err + } + w.first = false + } + // Copy bytes into the buffer. + n := copy(w.buf[w.j:], p) + w.j += n + p = p[n:] + } + return n0, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go new file mode 100644 index 0000000..ad8f51e --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/key.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrInternalKeyCorrupted records internal key corruption. +type ErrInternalKeyCorrupted struct { + Ikey []byte + Reason string +} + +func (e *ErrInternalKeyCorrupted) Error() string { + return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason) +} + +func newErrInternalKeyCorrupted(ikey []byte, reason string) error { + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason}) +} + +type keyType uint + +func (kt keyType) String() string { + switch kt { + case keyTypeDel: + return "d" + case keyTypeVal: + return "v" + } + return fmt.Sprintf("", uint(kt)) +} + +// Value types encoded as the last component of internal keys. +// Don't modify; this value are saved to disk. +const ( + keyTypeDel = keyType(0) + keyTypeVal = keyType(1) +) + +// keyTypeSeek defines the keyType that should be passed when constructing an +// internal key for seeking to a particular sequence number (since we +// sort sequence numbers in decreasing order and the value type is +// embedded as the low 8 bits in the sequence number in internal keys, +// we need to use the highest-numbered ValueType, not the lowest). +const keyTypeSeek = keyTypeVal + +const ( + // Maximum value possible for sequence number; the 8-bits are + // used by value type, so its can packed together in single + // 64-bit integer. + keyMaxSeq = (uint64(1) << 56) - 1 + // Maximum value possible for packed sequence number and type. + keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek) +) + +// Maximum number encoded in bytes. +var keyMaxNumBytes = make([]byte, 8) + +func init() { + binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum) +} + +type internalKey []byte + +func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey { + if seq > keyMaxSeq { + panic("leveldb: invalid sequence number") + } else if kt > keyTypeVal { + panic("leveldb: invalid type") + } + + dst = ensureBuffer(dst, len(ukey)+8) + copy(dst, ukey) + binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt)) + return internalKey(dst) +} + +func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) { + if len(ik) < 8 { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length") + } + num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type") + } + ukey = ik[:len(ik)-8] + return +} + +func validInternalKey(ik []byte) bool { + _, _, _, err := parseInternalKey(ik) + return err == nil +} + +func (ik internalKey) assert() { + if ik == nil { + panic("leveldb: nil internalKey") + } + if len(ik) < 8 { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik))) + } +} + +func (ik internalKey) ukey() []byte { + ik.assert() + return ik[:len(ik)-8] +} + +func (ik internalKey) num() uint64 { + ik.assert() + return binary.LittleEndian.Uint64(ik[len(ik)-8:]) +} + +func (ik internalKey) parseNum() (seq uint64, kt keyType) { + num := ik.num() + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) + } + return +} + +func (ik internalKey) String() string { + if ik == nil { + return "" + } + + if ukey, seq, kt, err := parseInternalKey(ik); err == nil { + return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) + } + return fmt.Sprintf("", []byte(ik)) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go new file mode 100644 index 0000000..824e47f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go @@ -0,0 +1,479 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package memdb provides in-memory key/value database implementation. +package memdb + +import ( + "math/rand" + "sync" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Common errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrIterReleased = errors.New("leveldb/memdb: iterator released") +) + +const tMaxHeight = 12 + +type dbIter struct { + util.BasicReleaser + p *DB + slice *util.Range + node int + forward bool + key, value []byte + err error +} + +func (i *dbIter) fill(checkStart, checkLimit bool) bool { + if i.node != 0 { + n := i.p.nodeData[i.node] + m := n + i.p.nodeData[i.node+nKey] + i.key = i.p.kvData[n:m] + if i.slice != nil { + switch { + case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: + fallthrough + case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: + i.node = 0 + goto bail + } + } + i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] + return true + } +bail: + i.key = nil + i.value = nil + return false +} + +func (i *dbIter) Valid() bool { + return i.node != 0 +} + +func (i *dbIter) First() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil { + i.node, _ = i.p.findGE(i.slice.Start, false) + } else { + i.node = i.p.nodeData[nNext] + } + return i.fill(false, true) +} + +func (i *dbIter) Last() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Limit != nil { + i.node = i.p.findLT(i.slice.Limit) + } else { + i.node = i.p.findLast() + } + return i.fill(true, false) +} + +func (i *dbIter) Seek(key []byte) bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { + key = i.slice.Start + } + i.node, _ = i.p.findGE(key, false) + return i.fill(false, true) +} + +func (i *dbIter) Next() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.node == 0 { + if !i.forward { + return i.First() + } + return false + } + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.nodeData[i.node+nNext] + return i.fill(false, true) +} + +func (i *dbIter) Prev() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.node == 0 { + if i.forward { + return i.Last() + } + return false + } + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.findLT(i.key) + return i.fill(true, false) +} + +func (i *dbIter) Key() []byte { + return i.key +} + +func (i *dbIter) Value() []byte { + return i.value +} + +func (i *dbIter) Error() error { return i.err } + +func (i *dbIter) Release() { + if !i.Released() { + i.p = nil + i.node = 0 + i.key = nil + i.value = nil + i.BasicReleaser.Release() + } +} + +const ( + nKV = iota + nKey + nVal + nHeight + nNext +) + +// DB is an in-memory key/value database. +type DB struct { + cmp comparer.BasicComparer + rnd *rand.Rand + + mu sync.RWMutex + kvData []byte + // Node data: + // [0] : KV offset + // [1] : Key length + // [2] : Value length + // [3] : Height + // [3..height] : Next nodes + nodeData []int + prevNode [tMaxHeight]int + maxHeight int + n int + kvSize int +} + +func (p *DB) randHeight() (h int) { + const branching = 4 + h = 1 + for h < tMaxHeight && p.rnd.Int()%branching == 0 { + h++ + } + return +} + +// Must hold RW-lock if prev == true, as it use shared prevNode slice. +func (p *DB) findGE(key []byte, prev bool) (int, bool) { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + cmp := 1 + if next != 0 { + o := p.nodeData[next] + cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) + } + if cmp < 0 { + // Keep searching in this list + node = next + } else { + if prev { + p.prevNode[h] = node + } else if cmp == 0 { + return next, true + } + if h == 0 { + return next, cmp == 0 + } + h-- + } + } +} + +func (p *DB) findLT(key []byte) int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + o := p.nodeData[next] + if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +func (p *DB) findLast() int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + if next == 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Put returns. +func (p *DB) Put(key []byte, value []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + if node, exact := p.findGE(key, true); exact { + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + p.nodeData[node] = kvOffset + m := p.nodeData[node+nVal] + p.nodeData[node+nVal] = len(value) + p.kvSize += len(value) - m + return nil + } + + h := p.randHeight() + if h > p.maxHeight { + for i := p.maxHeight; i < h; i++ { + p.prevNode[i] = 0 + } + p.maxHeight = h + } + + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + // Node + node := len(p.nodeData) + p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) + for i, n := range p.prevNode[:h] { + m := n + nNext + i + p.nodeData = append(p.nodeData, p.nodeData[m]) + p.nodeData[m] = node + } + + p.kvSize += len(key) + len(value) + p.n++ + return nil +} + +// Delete deletes the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (p *DB) Delete(key []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + node, exact := p.findGE(key, true) + if !exact { + return ErrNotFound + } + + h := p.nodeData[node+nHeight] + for i, n := range p.prevNode[:h] { + m := n + nNext + i + p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] + } + + p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] + p.n-- + return nil +} + +// Contains returns true if the given key are in the DB. +// +// It is safe to modify the contents of the arguments after Contains returns. +func (p *DB) Contains(key []byte) bool { + p.mu.RLock() + _, exact := p.findGE(key, false) + p.mu.RUnlock() + return exact +} + +// Get gets the value for the given key. It returns error.ErrNotFound if the +// DB does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (p *DB) Get(key []byte) (value []byte, err error) { + p.mu.RLock() + if node, exact := p.findGE(key, false); exact { + o := p.nodeData[node] + p.nodeData[node+nKey] + value = p.kvData[o : o+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Find returns. +func (p *DB) Find(key []byte) (rkey, value []byte, err error) { + p.mu.RLock() + if node, _ := p.findGE(key, false); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// NewIterator returns an iterator of the DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. However, the resultant key/value pairs are not guaranteed +// to be a consistent snapshot of the DB at a particular point in time. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { + return &dbIter{p: p, slice: slice} +} + +// Capacity returns keys/values buffer capacity. +func (p *DB) Capacity() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) +} + +// Size returns sum of keys and values length. Note that deleted +// key/value will not be accounted for, but it will still consume +// the buffer, since the buffer is append only. +func (p *DB) Size() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.kvSize +} + +// Free returns keys/values free buffer before need to grow. +func (p *DB) Free() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) - len(p.kvData) +} + +// Len returns the number of entries in the DB. +func (p *DB) Len() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.n +} + +// Reset resets the DB to initial empty state. Allows reuse the buffer. +func (p *DB) Reset() { + p.mu.Lock() + p.rnd = rand.New(rand.NewSource(0xdeadbeef)) + p.maxHeight = 1 + p.n = 0 + p.kvSize = 0 + p.kvData = p.kvData[:0] + p.nodeData = p.nodeData[:nNext+tMaxHeight] + p.nodeData[nKV] = 0 + p.nodeData[nKey] = 0 + p.nodeData[nVal] = 0 + p.nodeData[nHeight] = tMaxHeight + for n := 0; n < tMaxHeight; n++ { + p.nodeData[nNext+n] = 0 + p.prevNode[n] = 0 + } + p.mu.Unlock() +} + +// New creates a new initialized in-memory key/value DB. The capacity +// is the initial key/value buffer capacity. The capacity is advisory, +// not enforced. +// +// This DB is append-only, deleting an entry would remove entry node but not +// reclaim KV buffer. +// +// The returned DB instance is safe for concurrent use. +func New(cmp comparer.BasicComparer, capacity int) *DB { + p := &DB{ + cmp: cmp, + rnd: rand.New(rand.NewSource(0xdeadbeef)), + maxHeight: 1, + kvData: make([]byte, 0, capacity), + nodeData: make([]int, 4+tMaxHeight), + } + p.nodeData[nHeight] = tMaxHeight + return p +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go new file mode 100644 index 0000000..528b164 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go @@ -0,0 +1,697 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package opt provides sets of options used by LevelDB. +package opt + +import ( + "math" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/filter" +) + +const ( + KiB = 1024 + MiB = KiB * 1024 + GiB = MiB * 1024 +) + +var ( + DefaultBlockCacher = LRUCacher + DefaultBlockCacheCapacity = 8 * MiB + DefaultBlockRestartInterval = 16 + DefaultBlockSize = 4 * KiB + DefaultCompactionExpandLimitFactor = 25 + DefaultCompactionGPOverlapsFactor = 10 + DefaultCompactionL0Trigger = 4 + DefaultCompactionSourceLimitFactor = 1 + DefaultCompactionTableSize = 2 * MiB + DefaultCompactionTableSizeMultiplier = 1.0 + DefaultCompactionTotalSize = 10 * MiB + DefaultCompactionTotalSizeMultiplier = 10.0 + DefaultCompressionType = SnappyCompression + DefaultIteratorSamplingRate = 1 * MiB + DefaultOpenFilesCacher = LRUCacher + DefaultOpenFilesCacheCapacity = 500 + DefaultWriteBuffer = 4 * MiB + DefaultWriteL0PauseTrigger = 12 + DefaultWriteL0SlowdownTrigger = 8 +) + +// Cacher is a caching algorithm. +type Cacher interface { + New(capacity int) cache.Cacher +} + +type CacherFunc struct { + NewFunc func(capacity int) cache.Cacher +} + +func (f *CacherFunc) New(capacity int) cache.Cacher { + if f.NewFunc != nil { + return f.NewFunc(capacity) + } + return nil +} + +func noCacher(int) cache.Cacher { return nil } + +var ( + // LRUCacher is the LRU-cache algorithm. + LRUCacher = &CacherFunc{cache.NewLRU} + + // NoCacher is the value to disable caching algorithm. + NoCacher = &CacherFunc{} +) + +// Compression is the 'sorted table' block compression algorithm to use. +type Compression uint + +func (c Compression) String() string { + switch c { + case DefaultCompression: + return "default" + case NoCompression: + return "none" + case SnappyCompression: + return "snappy" + } + return "invalid" +} + +const ( + DefaultCompression Compression = iota + NoCompression + SnappyCompression + nCompression +) + +// Strict is the DB 'strict level'. +type Strict uint + +const ( + // If present then a corrupted or invalid chunk or block in manifest + // journal will cause an error instead of being dropped. + // This will prevent database with corrupted manifest to be opened. + StrictManifest Strict = 1 << iota + + // If present then journal chunk checksum will be verified. + StrictJournalChecksum + + // If present then a corrupted or invalid chunk or block in journal + // will cause an error instead of being dropped. + // This will prevent database with corrupted journal to be opened. + StrictJournal + + // If present then 'sorted table' block checksum will be verified. + // This has effect on both 'read operation' and compaction. + StrictBlockChecksum + + // If present then a corrupted 'sorted table' will fails compaction. + // The database will enter read-only mode. + StrictCompaction + + // If present then a corrupted 'sorted table' will halts 'read operation'. + StrictReader + + // If present then leveldb.Recover will drop corrupted 'sorted table'. + StrictRecovery + + // This only applicable for ReadOptions, if present then this ReadOptions + // 'strict level' will override global ones. + StrictOverride + + // StrictAll enables all strict flags. + StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery + + // DefaultStrict is the default strict flags. Specify any strict flags + // will override default strict flags as whole (i.e. not OR'ed). + DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader + + // NoStrict disables all strict flags. Override default strict flags. + NoStrict = ^StrictAll +) + +// Options holds the optional parameters for the DB at large. +type Options struct { + // AltFilters defines one or more 'alternative filters'. + // 'alternative filters' will be used during reads if a filter block + // does not match with the 'effective filter'. + // + // The default value is nil + AltFilters []filter.Filter + + // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. + // Specify NoCacher to disable caching algorithm. + // + // The default value is LRUCacher. + BlockCacher Cacher + + // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. + // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher. + // + // The default value is 8MiB. + BlockCacheCapacity int + + // BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging + // to removed 'sorted table'. + // + // The default if false. + BlockCacheEvictRemoved bool + + // BlockRestartInterval is the number of keys between restart points for + // delta encoding of keys. + // + // The default value is 16. + BlockRestartInterval int + + // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' + // block. + // + // The default value is 4KiB. + BlockSize int + + // CompactionExpandLimitFactor limits compaction size after expanded. + // This will be multiplied by table size limit at compaction target level. + // + // The default value is 25. + CompactionExpandLimitFactor int + + // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a + // single 'sorted table' generates. + // This will be multiplied by table size limit at grandparent level. + // + // The default value is 10. + CompactionGPOverlapsFactor int + + // CompactionL0Trigger defines number of 'sorted table' at level-0 that will + // trigger compaction. + // + // The default value is 4. + CompactionL0Trigger int + + // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to + // level-0. + // This will be multiplied by table size limit at compaction target level. + // + // The default value is 1. + CompactionSourceLimitFactor int + + // CompactionTableSize limits size of 'sorted table' that compaction generates. + // The limits for each level will be calculated as: + // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) + // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. + // + // The default value is 2MiB. + CompactionTableSize int + + // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. + // + // The default value is 1. + CompactionTableSizeMultiplier float64 + + // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for + // CompactionTableSize. + // Use zero to skip a level. + // + // The default value is nil. + CompactionTableSizeMultiplierPerLevel []float64 + + // CompactionTotalSize limits total size of 'sorted table' for each level. + // The limits for each level will be calculated as: + // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) + // The multiplier for each level can also fine-tuned using + // CompactionTotalSizeMultiplierPerLevel. + // + // The default value is 10MiB. + CompactionTotalSize int + + // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. + // + // The default value is 10. + CompactionTotalSizeMultiplier float64 + + // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for + // CompactionTotalSize. + // Use zero to skip a level. + // + // The default value is nil. + CompactionTotalSizeMultiplierPerLevel []float64 + + // Comparer defines a total ordering over the space of []byte keys: a 'less + // than' relationship. The same comparison algorithm must be used for reads + // and writes over the lifetime of the DB. + // + // The default value uses the same ordering as bytes.Compare. + Comparer comparer.Comparer + + // Compression defines the 'sorted table' block compression to use. + // + // The default value (DefaultCompression) uses snappy compression. + Compression Compression + + // DisableBufferPool allows disable use of util.BufferPool functionality. + // + // The default value is false. + DisableBufferPool bool + + // DisableBlockCache allows disable use of cache.Cache functionality on + // 'sorted table' block. + // + // The default value is false. + DisableBlockCache bool + + // DisableCompactionBackoff allows disable compaction retry backoff. + // + // The default value is false. + DisableCompactionBackoff bool + + // DisableLargeBatchTransaction allows disabling switch-to-transaction mode + // on large batch write. If enable batch writes large than WriteBuffer will + // use transaction. + // + // The default is false. + DisableLargeBatchTransaction bool + + // ErrorIfExist defines whether an error should returned if the DB already + // exist. + // + // The default value is false. + ErrorIfExist bool + + // ErrorIfMissing defines whether an error should returned if the DB is + // missing. If false then the database will be created if missing, otherwise + // an error will be returned. + // + // The default value is false. + ErrorIfMissing bool + + // Filter defines an 'effective filter' to use. An 'effective filter' + // if defined will be used to generate per-table filter block. + // The filter name will be stored on disk. + // During reads LevelDB will try to find matching filter from + // 'effective filter' and 'alternative filters'. + // + // Filter can be changed after a DB has been created. It is recommended + // to put old filter to the 'alternative filters' to mitigate lack of + // filter during transition period. + // + // A filter is used to reduce disk reads when looking for a specific key. + // + // The default value is nil. + Filter filter.Filter + + // IteratorSamplingRate defines approximate gap (in bytes) between read + // sampling of an iterator. The samples will be used to determine when + // compaction should be triggered. + // + // The default is 1MiB. + IteratorSamplingRate int + + // NoSync allows completely disable fsync. + // + // The default is false. + NoSync bool + + // NoWriteMerge allows disabling write merge. + // + // The default is false. + NoWriteMerge bool + + // OpenFilesCacher provides cache algorithm for open files caching. + // Specify NoCacher to disable caching algorithm. + // + // The default value is LRUCacher. + OpenFilesCacher Cacher + + // OpenFilesCacheCapacity defines the capacity of the open files caching. + // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. + // + // The default value is 500. + OpenFilesCacheCapacity int + + // If true then opens DB in read-only mode. + // + // The default value is false. + ReadOnly bool + + // Strict defines the DB strict level. + Strict Strict + + // WriteBuffer defines maximum size of a 'memdb' before flushed to + // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk + // unsorted journal. + // + // LevelDB may held up to two 'memdb' at the same time. + // + // The default value is 4MiB. + WriteBuffer int + + // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will + // pause write. + // + // The default value is 12. + WriteL0PauseTrigger int + + // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that + // will trigger write slowdown. + // + // The default value is 8. + WriteL0SlowdownTrigger int +} + +func (o *Options) GetAltFilters() []filter.Filter { + if o == nil { + return nil + } + return o.AltFilters +} + +func (o *Options) GetBlockCacher() Cacher { + if o == nil || o.BlockCacher == nil { + return DefaultBlockCacher + } else if o.BlockCacher == NoCacher { + return nil + } + return o.BlockCacher +} + +func (o *Options) GetBlockCacheCapacity() int { + if o == nil || o.BlockCacheCapacity == 0 { + return DefaultBlockCacheCapacity + } else if o.BlockCacheCapacity < 0 { + return 0 + } + return o.BlockCacheCapacity +} + +func (o *Options) GetBlockCacheEvictRemoved() bool { + if o == nil { + return false + } + return o.BlockCacheEvictRemoved +} + +func (o *Options) GetBlockRestartInterval() int { + if o == nil || o.BlockRestartInterval <= 0 { + return DefaultBlockRestartInterval + } + return o.BlockRestartInterval +} + +func (o *Options) GetBlockSize() int { + if o == nil || o.BlockSize <= 0 { + return DefaultBlockSize + } + return o.BlockSize +} + +func (o *Options) GetCompactionExpandLimit(level int) int { + factor := DefaultCompactionExpandLimitFactor + if o != nil && o.CompactionExpandLimitFactor > 0 { + factor = o.CompactionExpandLimitFactor + } + return o.GetCompactionTableSize(level+1) * factor +} + +func (o *Options) GetCompactionGPOverlaps(level int) int { + factor := DefaultCompactionGPOverlapsFactor + if o != nil && o.CompactionGPOverlapsFactor > 0 { + factor = o.CompactionGPOverlapsFactor + } + return o.GetCompactionTableSize(level+2) * factor +} + +func (o *Options) GetCompactionL0Trigger() int { + if o == nil || o.CompactionL0Trigger == 0 { + return DefaultCompactionL0Trigger + } + return o.CompactionL0Trigger +} + +func (o *Options) GetCompactionSourceLimit(level int) int { + factor := DefaultCompactionSourceLimitFactor + if o != nil && o.CompactionSourceLimitFactor > 0 { + factor = o.CompactionSourceLimitFactor + } + return o.GetCompactionTableSize(level+1) * factor +} + +func (o *Options) GetCompactionTableSize(level int) int { + var ( + base = DefaultCompactionTableSize + mult float64 + ) + if o != nil { + if o.CompactionTableSize > 0 { + base = o.CompactionTableSize + } + if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { + mult = o.CompactionTableSizeMultiplierPerLevel[level] + } else if o.CompactionTableSizeMultiplier > 0 { + mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) + } + } + if mult == 0 { + mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) + } + return int(float64(base) * mult) +} + +func (o *Options) GetCompactionTotalSize(level int) int64 { + var ( + base = DefaultCompactionTotalSize + mult float64 + ) + if o != nil { + if o.CompactionTotalSize > 0 { + base = o.CompactionTotalSize + } + if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { + mult = o.CompactionTotalSizeMultiplierPerLevel[level] + } else if o.CompactionTotalSizeMultiplier > 0 { + mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) + } + } + if mult == 0 { + mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) + } + return int64(float64(base) * mult) +} + +func (o *Options) GetComparer() comparer.Comparer { + if o == nil || o.Comparer == nil { + return comparer.DefaultComparer + } + return o.Comparer +} + +func (o *Options) GetCompression() Compression { + if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { + return DefaultCompressionType + } + return o.Compression +} + +func (o *Options) GetDisableBufferPool() bool { + if o == nil { + return false + } + return o.DisableBufferPool +} + +func (o *Options) GetDisableBlockCache() bool { + if o == nil { + return false + } + return o.DisableBlockCache +} + +func (o *Options) GetDisableCompactionBackoff() bool { + if o == nil { + return false + } + return o.DisableCompactionBackoff +} + +func (o *Options) GetDisableLargeBatchTransaction() bool { + if o == nil { + return false + } + return o.DisableLargeBatchTransaction +} + +func (o *Options) GetErrorIfExist() bool { + if o == nil { + return false + } + return o.ErrorIfExist +} + +func (o *Options) GetErrorIfMissing() bool { + if o == nil { + return false + } + return o.ErrorIfMissing +} + +func (o *Options) GetFilter() filter.Filter { + if o == nil { + return nil + } + return o.Filter +} + +func (o *Options) GetIteratorSamplingRate() int { + if o == nil || o.IteratorSamplingRate <= 0 { + return DefaultIteratorSamplingRate + } + return o.IteratorSamplingRate +} + +func (o *Options) GetNoSync() bool { + if o == nil { + return false + } + return o.NoSync +} + +func (o *Options) GetNoWriteMerge() bool { + if o == nil { + return false + } + return o.NoWriteMerge +} + +func (o *Options) GetOpenFilesCacher() Cacher { + if o == nil || o.OpenFilesCacher == nil { + return DefaultOpenFilesCacher + } + if o.OpenFilesCacher == NoCacher { + return nil + } + return o.OpenFilesCacher +} + +func (o *Options) GetOpenFilesCacheCapacity() int { + if o == nil || o.OpenFilesCacheCapacity == 0 { + return DefaultOpenFilesCacheCapacity + } else if o.OpenFilesCacheCapacity < 0 { + return 0 + } + return o.OpenFilesCacheCapacity +} + +func (o *Options) GetReadOnly() bool { + if o == nil { + return false + } + return o.ReadOnly +} + +func (o *Options) GetStrict(strict Strict) bool { + if o == nil || o.Strict == 0 { + return DefaultStrict&strict != 0 + } + return o.Strict&strict != 0 +} + +func (o *Options) GetWriteBuffer() int { + if o == nil || o.WriteBuffer <= 0 { + return DefaultWriteBuffer + } + return o.WriteBuffer +} + +func (o *Options) GetWriteL0PauseTrigger() int { + if o == nil || o.WriteL0PauseTrigger == 0 { + return DefaultWriteL0PauseTrigger + } + return o.WriteL0PauseTrigger +} + +func (o *Options) GetWriteL0SlowdownTrigger() int { + if o == nil || o.WriteL0SlowdownTrigger == 0 { + return DefaultWriteL0SlowdownTrigger + } + return o.WriteL0SlowdownTrigger +} + +// ReadOptions holds the optional parameters for 'read operation'. The +// 'read operation' includes Get, Find and NewIterator. +type ReadOptions struct { + // DontFillCache defines whether block reads for this 'read operation' + // should be cached. If false then the block will be cached. This does + // not affects already cached block. + // + // The default value is false. + DontFillCache bool + + // Strict will be OR'ed with global DB 'strict level' unless StrictOverride + // is present. Currently only StrictReader that has effect here. + Strict Strict +} + +func (ro *ReadOptions) GetDontFillCache() bool { + if ro == nil { + return false + } + return ro.DontFillCache +} + +func (ro *ReadOptions) GetStrict(strict Strict) bool { + if ro == nil { + return false + } + return ro.Strict&strict != 0 +} + +// WriteOptions holds the optional parameters for 'write operation'. The +// 'write operation' includes Write, Put and Delete. +type WriteOptions struct { + // NoWriteMerge allows disabling write merge. + // + // The default is false. + NoWriteMerge bool + + // Sync is whether to sync underlying writes from the OS buffer cache + // through to actual disk, if applicable. Setting Sync can result in + // slower writes. + // + // If false, and the machine crashes, then some recent writes may be lost. + // Note that if it is just the process that crashes (and the machine does + // not) then no writes will be lost. + // + // In other words, Sync being false has the same semantics as a write + // system call. Sync being true means write followed by fsync. + // + // The default value is false. + Sync bool +} + +func (wo *WriteOptions) GetNoWriteMerge() bool { + if wo == nil { + return false + } + return wo.NoWriteMerge +} + +func (wo *WriteOptions) GetSync() bool { + if wo == nil { + return false + } + return wo.Sync +} + +func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { + if ro.GetStrict(StrictOverride) { + return ro.GetStrict(strict) + } else { + return o.GetStrict(strict) || ro.GetStrict(strict) + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go new file mode 100644 index 0000000..b072b1a --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/options.go @@ -0,0 +1,107 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +func dupOptions(o *opt.Options) *opt.Options { + newo := &opt.Options{} + if o != nil { + *newo = *o + } + if newo.Strict == 0 { + newo.Strict = opt.DefaultStrict + } + return newo +} + +func (s *session) setOptions(o *opt.Options) { + no := dupOptions(o) + // Alternative filters. + if filters := o.GetAltFilters(); len(filters) > 0 { + no.AltFilters = make([]filter.Filter, len(filters)) + for i, filter := range filters { + no.AltFilters[i] = &iFilter{filter} + } + } + // Comparer. + s.icmp = &iComparer{o.GetComparer()} + no.Comparer = s.icmp + // Filter. + if filter := o.GetFilter(); filter != nil { + no.Filter = &iFilter{filter} + } + + s.o = &cachedOptions{Options: no} + s.o.cache() +} + +const optCachedLevel = 7 + +type cachedOptions struct { + *opt.Options + + compactionExpandLimit []int + compactionGPOverlaps []int + compactionSourceLimit []int + compactionTableSize []int + compactionTotalSize []int64 +} + +func (co *cachedOptions) cache() { + co.compactionExpandLimit = make([]int, optCachedLevel) + co.compactionGPOverlaps = make([]int, optCachedLevel) + co.compactionSourceLimit = make([]int, optCachedLevel) + co.compactionTableSize = make([]int, optCachedLevel) + co.compactionTotalSize = make([]int64, optCachedLevel) + + for level := 0; level < optCachedLevel; level++ { + co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) + co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) + co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) + co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) + co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) + } +} + +func (co *cachedOptions) GetCompactionExpandLimit(level int) int { + if level < optCachedLevel { + return co.compactionExpandLimit[level] + } + return co.Options.GetCompactionExpandLimit(level) +} + +func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { + if level < optCachedLevel { + return co.compactionGPOverlaps[level] + } + return co.Options.GetCompactionGPOverlaps(level) +} + +func (co *cachedOptions) GetCompactionSourceLimit(level int) int { + if level < optCachedLevel { + return co.compactionSourceLimit[level] + } + return co.Options.GetCompactionSourceLimit(level) +} + +func (co *cachedOptions) GetCompactionTableSize(level int) int { + if level < optCachedLevel { + return co.compactionTableSize[level] + } + return co.Options.GetCompactionTableSize(level) +} + +func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { + if level < optCachedLevel { + return co.compactionTotalSize[level] + } + return co.Options.GetCompactionTotalSize(level) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go new file mode 100644 index 0000000..3f391f9 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go @@ -0,0 +1,210 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "io" + "os" + "sync" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrManifestCorrupted records manifest corruption. This error will be +// wrapped with errors.ErrCorrupted. +type ErrManifestCorrupted struct { + Field string + Reason string +} + +func (e *ErrManifestCorrupted) Error() string { + return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) +} + +func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error { + return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason}) +} + +// session represent a persistent database session. +type session struct { + // Need 64-bit alignment. + stNextFileNum int64 // current unused file number + stJournalNum int64 // current journal file number; need external synchronization + stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stTempFileNum int64 + stSeqNum uint64 // last mem compacted seq; need external synchronization + + stor *iStorage + storLock storage.Locker + o *cachedOptions + icmp *iComparer + tops *tOps + fileRef map[int64]int + + manifest *journal.Writer + manifestWriter storage.Writer + manifestFd storage.FileDesc + + stCompPtrs []internalKey // compaction pointers; need external synchronization + stVersion *version // current version + vmu sync.Mutex +} + +// Creates new initialized session instance. +func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { + if stor == nil { + return nil, os.ErrInvalid + } + storLock, err := stor.Lock() + if err != nil { + return + } + s = &session{ + stor: newIStorage(stor), + storLock: storLock, + fileRef: make(map[int64]int), + } + s.setOptions(o) + s.tops = newTableOps(s) + s.setVersion(newVersion(s)) + s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") + return +} + +// Close session. +func (s *session) close() { + s.tops.close() + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + s.manifest = nil + s.manifestWriter = nil + s.setVersion(&version{s: s, closing: true}) +} + +// Release session lock. +func (s *session) release() { + s.storLock.Unlock() +} + +// Create a new database session; need external synchronization. +func (s *session) create() error { + // create manifest + return s.newManifest(nil, nil) +} + +// Recover a database session; need external synchronization. +func (s *session) recover() (err error) { + defer func() { + if os.IsNotExist(err) { + // Don't return os.ErrNotExist if the underlying storage contains + // other files that belong to LevelDB. So the DB won't get trashed. + if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 { + err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} + } + } + }() + + fd, err := s.stor.GetMeta() + if err != nil { + return + } + + reader, err := s.stor.Open(fd) + if err != nil { + return + } + defer reader.Close() + + var ( + // Options. + strict = s.o.GetStrict(opt.StrictManifest) + + jr = journal.NewReader(reader, dropper{s, fd}, strict, true) + rec = &sessionRecord{} + staging = s.stVersion.newStaging() + ) + for { + var r io.Reader + r, err = jr.Next() + if err != nil { + if err == io.EOF { + err = nil + break + } + return errors.SetFd(err, fd) + } + + err = rec.decode(r) + if err == nil { + // save compact pointers + for _, r := range rec.compPtrs { + s.setCompPtr(r.level, internalKey(r.ikey)) + } + // commit record to version staging + staging.commit(rec) + } else { + err = errors.SetFd(err, fd) + if strict || !errors.IsCorrupted(err) { + return + } + s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd)) + } + rec.resetCompPtrs() + rec.resetAddedTables() + rec.resetDeletedTables() + } + + switch { + case !rec.has(recComparer): + return newErrManifestCorrupted(fd, "comparer", "missing") + case rec.comparer != s.icmp.uName(): + return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) + case !rec.has(recNextFileNum): + return newErrManifestCorrupted(fd, "next-file-num", "missing") + case !rec.has(recJournalNum): + return newErrManifestCorrupted(fd, "journal-file-num", "missing") + case !rec.has(recSeqNum): + return newErrManifestCorrupted(fd, "seq-num", "missing") + } + + s.manifestFd = fd + s.setVersion(staging.finish()) + s.setNextFileNum(rec.nextFileNum) + s.recordCommited(rec) + return nil +} + +// Commit session; need external synchronization. +func (s *session) commit(r *sessionRecord) (err error) { + v := s.version() + defer v.release() + + // spawn new version based on current version + nv := v.spawn(r) + + if s.manifest == nil { + // manifest journal writer not yet created, create one + err = s.newManifest(r, nv) + } else { + err = s.flushManifest(r) + } + + // finally, apply new version if no error rise + if err == nil { + s.setVersion(nv) + } + + return +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go new file mode 100644 index 0000000..089cd00 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go @@ -0,0 +1,302 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int { + v := s.version() + defer v.release() + return v.pickMemdbLevel(umin, umax, maxLevel) +} + +func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) { + // Create sorted table. + iter := mdb.NewIterator(nil) + defer iter.Release() + t, n, err := s.tops.createFrom(iter) + if err != nil { + return 0, err + } + + // Pick level other than zero can cause compaction issue with large + // bulk insert and delete on strictly incrementing key-space. The + // problem is that the small deletion markers trapped at lower level, + // while key/value entries keep growing at higher level. Since the + // key-space is strictly incrementing it will not overlaps with + // higher level, thus maximum possible level is always picked, while + // overlapping deletion marker pushed into lower level. + // See: https://github.com/syndtr/goleveldb/issues/127. + flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel) + rec.addTableFile(flushLevel, t) + + s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + return flushLevel, nil +} + +// Pick a compaction based on current state; need external synchronization. +func (s *session) pickCompaction() *compaction { + v := s.version() + + var sourceLevel int + var t0 tFiles + if v.cScore >= 1 { + sourceLevel = v.cLevel + cptr := s.getCompPtr(sourceLevel) + tables := v.levels[sourceLevel] + for _, t := range tables { + if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { + t0 = append(t0, t) + break + } + } + if len(t0) == 0 { + t0 = append(t0, tables[0]) + } + } else { + if p := atomic.LoadPointer(&v.cSeek); p != nil { + ts := (*tSet)(p) + sourceLevel = ts.level + t0 = append(t0, ts.table) + } else { + v.release() + return nil + } + } + + return newCompaction(s, v, sourceLevel, t0) +} + +// Create compaction from given level and range; need external synchronization. +func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction { + v := s.version() + + if sourceLevel >= len(v.levels) { + v.release() + return nil + } + + t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0) + if len(t0) == 0 { + v.release() + return nil + } + + // Avoid compacting too much in one shot in case the range is large. + // But we cannot do this for level-0 since level-0 files can overlap + // and we must not pick one file and drop another older file if the + // two files overlap. + if !noLimit && sourceLevel > 0 { + limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel)) + total := int64(0) + for i, t := range t0 { + total += t.size + if total >= limit { + s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) + t0 = t0[:i+1] + break + } + } + } + + return newCompaction(s, v, sourceLevel, t0) +} + +func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction { + c := &compaction{ + s: s, + v: v, + sourceLevel: sourceLevel, + levels: [2]tFiles{t0, nil}, + maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)), + tPtrs: make([]int, len(v.levels)), + } + c.expand() + c.save() + return c +} + +// compaction represent a compaction state. +type compaction struct { + s *session + v *version + + sourceLevel int + levels [2]tFiles + maxGPOverlaps int64 + + gp tFiles + gpi int + seenKey bool + gpOverlappedBytes int64 + imin, imax internalKey + tPtrs []int + released bool + + snapGPI int + snapSeenKey bool + snapGPOverlappedBytes int64 + snapTPtrs []int +} + +func (c *compaction) save() { + c.snapGPI = c.gpi + c.snapSeenKey = c.seenKey + c.snapGPOverlappedBytes = c.gpOverlappedBytes + c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) +} + +func (c *compaction) restore() { + c.gpi = c.snapGPI + c.seenKey = c.snapSeenKey + c.gpOverlappedBytes = c.snapGPOverlappedBytes + c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) +} + +func (c *compaction) release() { + if !c.released { + c.released = true + c.v.release() + } +} + +// Expand compacted tables; need external synchronization. +func (c *compaction) expand() { + limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel)) + vt0 := c.v.levels[c.sourceLevel] + vt1 := tFiles{} + if level := c.sourceLevel + 1; level < len(c.v.levels) { + vt1 = c.v.levels[level] + } + + t0, t1 := c.levels[0], c.levels[1] + imin, imax := t0.getRange(c.s.icmp) + // We expand t0 here just incase ukey hop across tables. + t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0) + if len(t0) != len(c.levels[0]) { + imin, imax = t0.getRange(c.s.icmp) + } + t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) + // Get entire range covered by compaction. + amin, amax := append(t0, t1...).getRange(c.s.icmp) + + // See if we can grow the number of inputs in "sourceLevel" without + // changing the number of "sourceLevel+1" files we pick up. + if len(t1) > 0 { + exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0) + if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { + xmin, xmax := exp0.getRange(c.s.icmp) + exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) + if len(exp1) == len(t1) { + c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", + c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), + len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) + imin, imax = xmin, xmax + t0, t1 = exp0, exp1 + amin, amax = append(t0, t1...).getRange(c.s.icmp) + } + } + } + + // Compute the set of grandparent files that overlap this compaction + // (parent == sourceLevel+1; grandparent == sourceLevel+2) + if level := c.sourceLevel + 2; level < len(c.v.levels) { + c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) + } + + c.levels[0], c.levels[1] = t0, t1 + c.imin, c.imax = imin, imax +} + +// Check whether compaction is trivial. +func (c *compaction) trivial() bool { + return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps +} + +func (c *compaction) baseLevelForKey(ukey []byte) bool { + for level := c.sourceLevel + 2; level < len(c.v.levels); level++ { + tables := c.v.levels[level] + for c.tPtrs[level] < len(tables) { + t := tables[c.tPtrs[level]] + if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { + // We've advanced far enough. + if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + // Key falls in this file's range, so definitely not base level. + return false + } + break + } + c.tPtrs[level]++ + } + } + return true +} + +func (c *compaction) shouldStopBefore(ikey internalKey) bool { + for ; c.gpi < len(c.gp); c.gpi++ { + gp := c.gp[c.gpi] + if c.s.icmp.Compare(ikey, gp.imax) <= 0 { + break + } + if c.seenKey { + c.gpOverlappedBytes += gp.size + } + } + c.seenKey = true + + if c.gpOverlappedBytes > c.maxGPOverlaps { + // Too much overlap for current output; start new output. + c.gpOverlappedBytes = 0 + return true + } + return false +} + +// Creates an iterator. +func (c *compaction) newIterator() iterator.Iterator { + // Creates iterator slice. + icap := len(c.levels) + if c.sourceLevel == 0 { + // Special case for level-0. + icap = len(c.levels[0]) + 1 + } + its := make([]iterator.Iterator, 0, icap) + + // Options. + ro := &opt.ReadOptions{ + DontFillCache: true, + Strict: opt.StrictOverride, + } + strict := c.s.o.GetStrict(opt.StrictCompaction) + if strict { + ro.Strict |= opt.StrictReader + } + + for i, tables := range c.levels { + if len(tables) == 0 { + continue + } + + // Level-0 is not sorted and may overlaps each other. + if c.sourceLevel+i == 0 { + for _, t := range tables { + its = append(its, c.s.tops.newIterator(t, nil, ro)) + } + } else { + it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) + its = append(its, it) + } + } + + return iterator.NewMergedIterator(its, c.s.icmp, strict) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go new file mode 100644 index 0000000..854e1aa --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go @@ -0,0 +1,323 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bufio" + "encoding/binary" + "io" + "strings" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +// These numbers are written to disk and should not be changed. +const ( + recComparer = 1 + recJournalNum = 2 + recNextFileNum = 3 + recSeqNum = 4 + recCompPtr = 5 + recDelTable = 6 + recAddTable = 7 + // 8 was used for large value refs + recPrevJournalNum = 9 +) + +type cpRecord struct { + level int + ikey internalKey +} + +type atRecord struct { + level int + num int64 + size int64 + imin internalKey + imax internalKey +} + +type dtRecord struct { + level int + num int64 +} + +type sessionRecord struct { + hasRec int + comparer string + journalNum int64 + prevJournalNum int64 + nextFileNum int64 + seqNum uint64 + compPtrs []cpRecord + addedTables []atRecord + deletedTables []dtRecord + + scratch [binary.MaxVarintLen64]byte + err error +} + +func (p *sessionRecord) has(rec int) bool { + return p.hasRec&(1< +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// Logging. + +type dropper struct { + s *session + fd storage.FileDesc +} + +func (d dropper) Drop(err error) { + if e, ok := err.(*journal.ErrCorrupted); ok { + d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason) + } else { + d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err) + } +} + +func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } +func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } + +// File utils. + +func (s *session) newTemp() storage.FileDesc { + num := atomic.AddInt64(&s.stTempFileNum, 1) - 1 + return storage.FileDesc{Type: storage.TypeTemp, Num: num} +} + +func (s *session) addFileRef(fd storage.FileDesc, ref int) int { + ref += s.fileRef[fd.Num] + if ref > 0 { + s.fileRef[fd.Num] = ref + } else if ref == 0 { + delete(s.fileRef, fd.Num) + } else { + panic(fmt.Sprintf("negative ref: %v", fd)) + } + return ref +} + +// Session state. + +// Get current version. This will incr version ref, must call +// version.release (exactly once) after use. +func (s *session) version() *version { + s.vmu.Lock() + defer s.vmu.Unlock() + s.stVersion.incref() + return s.stVersion +} + +func (s *session) tLen(level int) int { + s.vmu.Lock() + defer s.vmu.Unlock() + return s.stVersion.tLen(level) +} + +// Set current version to v. +func (s *session) setVersion(v *version) { + s.vmu.Lock() + defer s.vmu.Unlock() + // Hold by session. It is important to call this first before releasing + // current version, otherwise the still used files might get released. + v.incref() + if s.stVersion != nil { + // Release current version. + s.stVersion.releaseNB() + } + s.stVersion = v +} + +// Get current unused file number. +func (s *session) nextFileNum() int64 { + return atomic.LoadInt64(&s.stNextFileNum) +} + +// Set current unused file number to num. +func (s *session) setNextFileNum(num int64) { + atomic.StoreInt64(&s.stNextFileNum, num) +} + +// Mark file number as used. +func (s *session) markFileNum(num int64) { + nextFileNum := num + 1 + for { + old, x := s.stNextFileNum, nextFileNum + if old > x { + x = old + } + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { + break + } + } +} + +// Allocate a file number. +func (s *session) allocFileNum() int64 { + return atomic.AddInt64(&s.stNextFileNum, 1) - 1 +} + +// Reuse given file number. +func (s *session) reuseFileNum(num int64) { + for { + old, x := s.stNextFileNum, num + if old != x+1 { + x = old + } + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { + break + } + } +} + +// Set compaction ptr at given level; need external synchronization. +func (s *session) setCompPtr(level int, ik internalKey) { + if level >= len(s.stCompPtrs) { + newCompPtrs := make([]internalKey, level+1) + copy(newCompPtrs, s.stCompPtrs) + s.stCompPtrs = newCompPtrs + } + s.stCompPtrs[level] = append(internalKey{}, ik...) +} + +// Get compaction ptr at given level; need external synchronization. +func (s *session) getCompPtr(level int) internalKey { + if level >= len(s.stCompPtrs) { + return nil + } + return s.stCompPtrs[level] +} + +// Manifest related utils. + +// Fill given session record obj with current states; need external +// synchronization. +func (s *session) fillRecord(r *sessionRecord, snapshot bool) { + r.setNextFileNum(s.nextFileNum()) + + if snapshot { + if !r.has(recJournalNum) { + r.setJournalNum(s.stJournalNum) + } + + if !r.has(recSeqNum) { + r.setSeqNum(s.stSeqNum) + } + + for level, ik := range s.stCompPtrs { + if ik != nil { + r.addCompPtr(level, ik) + } + } + + r.setComparer(s.icmp.uName()) + } +} + +// Mark if record has been committed, this will update session state; +// need external synchronization. +func (s *session) recordCommited(rec *sessionRecord) { + if rec.has(recJournalNum) { + s.stJournalNum = rec.journalNum + } + + if rec.has(recPrevJournalNum) { + s.stPrevJournalNum = rec.prevJournalNum + } + + if rec.has(recSeqNum) { + s.stSeqNum = rec.seqNum + } + + for _, r := range rec.compPtrs { + s.setCompPtr(r.level, internalKey(r.ikey)) + } +} + +// Create a new manifest file; need external synchronization. +func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { + fd := storage.FileDesc{Type: storage.TypeManifest, Num: s.allocFileNum()} + writer, err := s.stor.Create(fd) + if err != nil { + return + } + jw := journal.NewWriter(writer) + + if v == nil { + v = s.version() + defer v.release() + } + if rec == nil { + rec = &sessionRecord{} + } + s.fillRecord(rec, true) + v.fillRecord(rec) + + defer func() { + if err == nil { + s.recordCommited(rec) + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + if !s.manifestFd.Zero() { + s.stor.Remove(s.manifestFd) + } + s.manifestFd = fd + s.manifestWriter = writer + s.manifest = jw + } else { + writer.Close() + s.stor.Remove(fd) + s.reuseFileNum(fd.Num) + } + }() + + w, err := jw.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = jw.Flush() + if err != nil { + return + } + err = s.stor.SetMeta(fd) + return +} + +// Flush record to disk. +func (s *session) flushManifest(rec *sessionRecord) (err error) { + s.fillRecord(rec, false) + w, err := s.manifest.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = s.manifest.Flush() + if err != nil { + return + } + if !s.o.GetNoSync() { + err = s.manifestWriter.Sync() + if err != nil { + return + } + } + s.recordCommited(rec) + return +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go new file mode 100644 index 0000000..d45fb5d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go @@ -0,0 +1,63 @@ +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/storage" + "sync/atomic" +) + +type iStorage struct { + storage.Storage + read uint64 + write uint64 +} + +func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) { + r, err := c.Storage.Open(fd) + return &iStorageReader{r, c}, err +} + +func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) { + w, err := c.Storage.Create(fd) + return &iStorageWriter{w, c}, err +} + +func (c *iStorage) reads() uint64 { + return atomic.LoadUint64(&c.read) +} + +func (c *iStorage) writes() uint64 { + return atomic.LoadUint64(&c.write) +} + +// newIStorage returns the given storage wrapped by iStorage. +func newIStorage(s storage.Storage) *iStorage { + return &iStorage{s, 0, 0} +} + +type iStorageReader struct { + storage.Reader + c *iStorage +} + +func (r *iStorageReader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + atomic.AddUint64(&r.c.read, uint64(n)) + return n, err +} + +func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) { + n, err = r.Reader.ReadAt(p, off) + atomic.AddUint64(&r.c.read, uint64(n)) + return n, err +} + +type iStorageWriter struct { + storage.Writer + c *iStorage +} + +func (w *iStorageWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + atomic.AddUint64(&w.c.write, uint64(n)) + return n, err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go new file mode 100644 index 0000000..9ba71fd --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -0,0 +1,671 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reservefs. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +var ( + errFileOpen = errors.New("leveldb/storage: file still open") + errReadOnly = errors.New("leveldb/storage: storage is read-only") +) + +type fileLock interface { + release() error +} + +type fileStorageLock struct { + fs *fileStorage +} + +func (lock *fileStorageLock) Unlock() { + if lock.fs != nil { + lock.fs.mu.Lock() + defer lock.fs.mu.Unlock() + if lock.fs.slock == lock { + lock.fs.slock = nil + } + } +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func writeFileSynced(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Sync(); err == nil { + err = err1 + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +const logSizeThreshold = 1024 * 1024 // 1 MiB + +// fileStorage is a file-system backed storage. +type fileStorage struct { + path string + readOnly bool + + mu sync.Mutex + flock fileLock + slock *fileStorageLock + logw *os.File + logSize int64 + buf []byte + // Opened file counter; if open < 0 means closed. + open int + day int +} + +// OpenFile returns a new filesystem-backed storage implementation with the given +// path. This also acquire a file lock, so any subsequent attempt to open the +// same path will fail. +// +// The storage must be closed after use, by calling Close method. +func OpenFile(path string, readOnly bool) (Storage, error) { + if fi, err := os.Stat(path); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path) + } + } else if os.IsNotExist(err) && !readOnly { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + } else { + return nil, err + } + + flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + flock.release() + } + }() + + var ( + logw *os.File + logSize int64 + ) + if !readOnly { + logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + logSize, err = logw.Seek(0, os.SEEK_END) + if err != nil { + logw.Close() + return nil, err + } + } + + fs := &fileStorage{ + path: path, + readOnly: readOnly, + flock: flock, + logw: logw, + logSize: logSize, + } + runtime.SetFinalizer(fs, (*fileStorage).Close) + return fs, nil +} + +func (fs *fileStorage) Lock() (Locker, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + if fs.readOnly { + return &fileStorageLock{}, nil + } + if fs.slock != nil { + return nil, ErrLocked + } + fs.slock = &fileStorageLock{fs: fs} + return fs.slock, nil +} + +func itoa(buf []byte, i int, wid int) []byte { + u := uint(i) + if u == 0 && wid <= 1 { + return append(buf, '0') + } + + // Assemble decimal in reverse order. + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + return append(buf, b[bp:]...) +} + +func (fs *fileStorage) printDay(t time.Time) { + if fs.day == t.Day() { + return + } + fs.day = t.Day() + fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) +} + +func (fs *fileStorage) doLog(t time.Time, str string) { + if fs.logSize > logSizeThreshold { + // Rotate log file. + fs.logw.Close() + fs.logw = nil + fs.logSize = 0 + rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")) + } + if fs.logw == nil { + var err error + fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return + } + // Force printDay on new log file. + fs.day = 0 + } + fs.printDay(t) + hour, min, sec := t.Clock() + msec := t.Nanosecond() / 1e3 + // time + fs.buf = itoa(fs.buf[:0], hour, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, min, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, sec, 2) + fs.buf = append(fs.buf, '.') + fs.buf = itoa(fs.buf, msec, 6) + fs.buf = append(fs.buf, ' ') + // write + fs.buf = append(fs.buf, []byte(str)...) + fs.buf = append(fs.buf, '\n') + n, _ := fs.logw.Write(fs.buf) + fs.logSize += int64(n) +} + +func (fs *fileStorage) Log(str string) { + if !fs.readOnly { + t := time.Now() + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return + } + fs.doLog(t, str) + } +} + +func (fs *fileStorage) log(str string) { + if !fs.readOnly { + fs.doLog(time.Now(), str) + } +} + +func (fs *fileStorage) setMeta(fd FileDesc) error { + content := fsGenName(fd) + "\n" + // Check and backup old CURRENT file. + currentPath := filepath.Join(fs.path, "CURRENT") + if _, err := os.Stat(currentPath); err == nil { + b, err := ioutil.ReadFile(currentPath) + if err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + if string(b) == content { + // Content not changed, do nothing. + return nil + } + if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + } else if !os.IsNotExist(err) { + return err + } + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + if err := writeFileSynced(path, []byte(content), 0644); err != nil { + fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err)) + return err + } + // Replace CURRENT file. + if err := rename(path, currentPath); err != nil { + fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) + return err + } + // Sync root directory. + if err := syncDir(fs.path); err != nil { + fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + return nil +} + +func (fs *fileStorage) SetMeta(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + return fs.setMeta(fd) +} + +func (fs *fileStorage) GetMeta() (FileDesc, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return FileDesc{}, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return FileDesc{}, err + } + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if ce := dir.Close(); ce != nil { + fs.log(fmt.Sprintf("close dir: %v", ce)) + } + if err != nil { + return FileDesc{}, err + } + // Try this in order: + // - CURRENT.[0-9]+ ('pending rename' file, descending order) + // - CURRENT + // - CURRENT.bak + // + // Skip corrupted file or file that point to a missing target file. + type currentFile struct { + name string + fd FileDesc + } + tryCurrent := func(name string) (*currentFile, error) { + b, err := ioutil.ReadFile(filepath.Join(fs.path, name)) + if err != nil { + if os.IsNotExist(err) { + err = os.ErrNotExist + } + return nil, err + } + var fd FileDesc + if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) { + fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b)) + err := &ErrCorrupted{ + Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"), + } + return nil, err + } + if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil { + if os.IsNotExist(err) { + fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd)) + err = os.ErrNotExist + } + return nil, err + } + return ¤tFile{name: name, fd: fd}, nil + } + tryCurrents := func(names []string) (*currentFile, error) { + var ( + cur *currentFile + // Last corruption error. + lastCerr error + ) + for _, name := range names { + var err error + cur, err = tryCurrent(name) + if err == nil { + break + } else if err == os.ErrNotExist { + // Fallback to the next file. + } else if isCorrupted(err) { + lastCerr = err + // Fallback to the next file. + } else { + // In case the error is due to permission, etc. + return nil, err + } + } + if cur == nil { + err := os.ErrNotExist + if lastCerr != nil { + err = lastCerr + } + return nil, err + } + return cur, nil + } + + // Try 'pending rename' files. + var nums []int64 + for _, name := range names { + if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" { + i, err := strconv.ParseInt(name[8:], 10, 64) + if err == nil { + nums = append(nums, i) + } + } + } + var ( + pendCur *currentFile + pendErr = os.ErrNotExist + pendNames []string + ) + if len(nums) > 0 { + sort.Sort(sort.Reverse(int64Slice(nums))) + pendNames = make([]string, len(nums)) + for i, num := range nums { + pendNames[i] = fmt.Sprintf("CURRENT.%d", num) + } + pendCur, pendErr = tryCurrents(pendNames) + if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + } + + // Try CURRENT and CURRENT.bak. + curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"}) + if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) { + return FileDesc{}, curErr + } + + // pendCur takes precedence, but guards against obsolete pendCur. + if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) { + curCur = pendCur + } + + if curCur != nil { + // Restore CURRENT file to proper state. + if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) { + // Ignore setMeta errors, however don't delete obsolete files if we + // catch error. + if err := fs.setMeta(curCur.fd); err == nil { + // Remove 'pending rename' files. + for _, name := range pendNames { + if err := os.Remove(filepath.Join(fs.path, name)); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", name, err)) + } + } + } + } + return curCur.fd, nil + } + + // Nothing found. + if isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + return FileDesc{}, curErr +} + +func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if cerr := dir.Close(); cerr != nil { + fs.log(fmt.Sprintf("close dir: %v", cerr)) + } + if err == nil { + for _, name := range names { + if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 { + fds = append(fds, fd) + } + } + } + return +} + +func (fs *fileStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0) + if err == nil { + goto ok + } + } + return nil, err + } +ok: + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil +} + +func (fs *fileStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + if fs.readOnly { + return nil, errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil, err + } + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil +} + +func (fs *fileStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + err := os.Remove(filepath.Join(fs.path, fsGenName(fd))) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) { + fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err)) + err = e1 + } + } else { + fs.log(fmt.Sprintf("remove %s: %v", fd, err)) + } + } + return err +} + +func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { + return ErrInvalidFile + } + if oldfd == newfd { + return nil + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd))) +} + +func (fs *fileStorage) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + // Clear the finalizer. + runtime.SetFinalizer(fs, nil) + + if fs.open > 0 { + fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) + } + fs.open = -1 + if fs.logw != nil { + fs.logw.Close() + } + return fs.flock.release() +} + +type fileWrap struct { + *os.File + fs *fileStorage + fd FileDesc + closed bool +} + +func (fw *fileWrap) Sync() error { + if err := fw.File.Sync(); err != nil { + return err + } + if fw.fd.Type == TypeManifest { + // Also sync parent directory if file type is manifest. + // See: https://code.google.com/p/leveldb/issues/detail?id=190. + if err := syncDir(fw.fs.path); err != nil { + fw.fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + } + return nil +} + +func (fw *fileWrap) Close() error { + fw.fs.mu.Lock() + defer fw.fs.mu.Unlock() + if fw.closed { + return ErrClosed + } + fw.closed = true + fw.fs.open-- + err := fw.File.Close() + if err != nil { + fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err)) + } + return err +} + +func fsGenName(fd FileDesc) string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + panic("invalid file type") + } +} + +func fsHasOldName(fd FileDesc) bool { + return fd.Type == TypeTable +} + +func fsGenOldName(fd FileDesc) string { + switch fd.Type { + case TypeTable: + return fmt.Sprintf("%06d.sst", fd.Num) + } + return fsGenName(fd) +} + +func fsParseName(name string) (fd FileDesc, ok bool) { + var tail string + _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail) + if err == nil { + switch tail { + case "log": + fd.Type = TypeJournal + case "ldb", "sst": + fd.Type = TypeTable + case "tmp": + fd.Type = TypeTemp + default: + return + } + return fd, true + } + n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail) + if n == 1 { + fd.Type = TypeManifest + return fd, true + } + return +} + +func fsParseNamePtr(name string, fd *FileDesc) bool { + _fd, ok := fsParseName(name) + if fd != nil { + *fd = _fd + } + return ok +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go new file mode 100644 index 0000000..5545aee --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go @@ -0,0 +1,34 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build nacl + +package storage + +import ( + "os" + "syscall" +) + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + return nil, syscall.ENOTSUP +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + return syscall.ENOTSUP +} + +func rename(oldpath, newpath string) error { + return syscall.ENOTSUP +} + +func isErrInvalid(err error) bool { + return false +} + +func syncDir(name string) error { + return syscall.ENOTSUP +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go new file mode 100644 index 0000000..b829798 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go @@ -0,0 +1,63 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "os" +) + +type plan9FileLock struct { + f *os.File +} + +func (fl *plan9FileLock) release() error { + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var ( + flag int + perm os.FileMode + ) + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + perm = os.ModeExclusive + } + f, err := os.OpenFile(path, flag, perm) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644) + } + if err != nil { + return + } + fl = &plan9FileLock{f: f} + return +} + +func rename(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err := os.Remove(newpath); err != nil { + return err + } + } + + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go new file mode 100644 index 0000000..79901ee --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go @@ -0,0 +1,81 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build solaris + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } + if err != nil { + return + } + err = setFileLock(f, readOnly, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + flock := syscall.Flock_t{ + Type: syscall.F_UNLCK, + Start: 0, + Len: 0, + Whence: 1, + } + if lock { + if readOnly { + flock.Type = syscall.F_RDLCK + } else { + flock.Type = syscall.F_WRLCK + } + } + return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go new file mode 100644 index 0000000..d75f66a --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -0,0 +1,98 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } + if err != nil { + return + } + err = setFileLock(f, readOnly, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + how := syscall.LOCK_UN + if lock { + if readOnly { + how = syscall.LOCK_SH + } else { + how = syscall.LOCK_EX + } + } + return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func isErrInvalid(err error) bool { + if err == os.ErrInvalid { + return true + } + // Go < 1.8 + if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL { + return true + } + // Go >= 1.8 returns *os.PathError instead + if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL { + return true + } + return false +} + +func syncDir(name string) error { + // As per fsync manpage, Linux seems to expect fsync on directory, however + // some system don't support this, so we will ignore syscall.EINVAL. + // + // From fsync(2): + // Calling fsync() does not necessarily ensure that the entry in the + // directory containing the file has also reached disk. For that an + // explicit fsync() on a file descriptor for the directory is also needed. + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil && !isErrInvalid(err) { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go new file mode 100644 index 0000000..899335f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go @@ -0,0 +1,78 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procMoveFileExW = modkernel32.NewProc("MoveFileExW") +) + +const ( + _MOVEFILE_REPLACE_EXISTING = 1 +) + +type windowsFileLock struct { + fd syscall.Handle +} + +func (fl *windowsFileLock) release() error { + return syscall.Close(fl.fd) +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return + } + var access, shareMode uint32 + if readOnly { + access = syscall.GENERIC_READ + shareMode = syscall.FILE_SHARE_READ + } else { + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if err == syscall.ERROR_FILE_NOT_FOUND { + fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + } + if err != nil { + return + } + fl = &windowsFileLock{fd: fd} + return +} + +func moveFileEx(from *uint16, to *uint16, flags uint32) error { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + return error(e1) + } + return syscall.EINVAL + } + return nil +} + +func rename(oldpath, newpath string) error { + from, err := syscall.UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := syscall.UTF16PtrFromString(newpath) + if err != nil { + return err + } + return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) +} + +func syncDir(name string) error { return nil } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go new file mode 100644 index 0000000..838f1be --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go @@ -0,0 +1,222 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "os" + "sync" +) + +const typeShift = 4 + +// Verify at compile-time that typeShift is large enough to cover all FileType +// values by confirming that 0 == 0. +var _ [0]struct{} = [TypeAll >> typeShift]struct{}{} + +type memStorageLock struct { + ms *memStorage +} + +func (lock *memStorageLock) Unlock() { + ms := lock.ms + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock == lock { + ms.slock = nil + } + return +} + +// memStorage is a memory-backed storage. +type memStorage struct { + mu sync.Mutex + slock *memStorageLock + files map[uint64]*memFile + meta FileDesc +} + +// NewMemStorage returns a new memory-backed storage implementation. +func NewMemStorage() Storage { + return &memStorage{ + files: make(map[uint64]*memFile), + } +} + +func (ms *memStorage) Lock() (Locker, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock != nil { + return nil, ErrLocked + } + ms.slock = &memStorageLock{ms: ms} + return ms.slock, nil +} + +func (*memStorage) Log(str string) {} + +func (ms *memStorage) SetMeta(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + + ms.mu.Lock() + ms.meta = fd + ms.mu.Unlock() + return nil +} + +func (ms *memStorage) GetMeta() (FileDesc, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.meta.Zero() { + return FileDesc{}, os.ErrNotExist + } + return ms.meta, nil +} + +func (ms *memStorage) List(ft FileType) ([]FileDesc, error) { + ms.mu.Lock() + var fds []FileDesc + for x := range ms.files { + fd := unpackFile(x) + if fd.Type&ft != 0 { + fds = append(fds, fd) + } + } + ms.mu.Unlock() + return fds, nil +} + +func (ms *memStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + ms.mu.Lock() + defer ms.mu.Unlock() + if m, exist := ms.files[packFile(fd)]; exist { + if m.open { + return nil, errFileOpen + } + m.open = true + return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil + } + return nil, os.ErrNotExist +} + +func (ms *memStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + x := packFile(fd) + ms.mu.Lock() + defer ms.mu.Unlock() + m, exist := ms.files[x] + if exist { + if m.open { + return nil, errFileOpen + } + m.Reset() + } else { + m = &memFile{} + ms.files[x] = m + } + m.open = true + return &memWriter{memFile: m, ms: ms}, nil +} + +func (ms *memStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + + x := packFile(fd) + ms.mu.Lock() + defer ms.mu.Unlock() + if _, exist := ms.files[x]; exist { + delete(ms.files, x) + return nil + } + return os.ErrNotExist +} + +func (ms *memStorage) Rename(oldfd, newfd FileDesc) error { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { + return ErrInvalidFile + } + if oldfd == newfd { + return nil + } + + oldx := packFile(oldfd) + newx := packFile(newfd) + ms.mu.Lock() + defer ms.mu.Unlock() + oldm, exist := ms.files[oldx] + if !exist { + return os.ErrNotExist + } + newm, exist := ms.files[newx] + if (exist && newm.open) || oldm.open { + return errFileOpen + } + delete(ms.files, oldx) + ms.files[newx] = oldm + return nil +} + +func (*memStorage) Close() error { return nil } + +type memFile struct { + bytes.Buffer + open bool +} + +type memReader struct { + *bytes.Reader + ms *memStorage + m *memFile + closed bool +} + +func (mr *memReader) Close() error { + mr.ms.mu.Lock() + defer mr.ms.mu.Unlock() + if mr.closed { + return ErrClosed + } + mr.m.open = false + return nil +} + +type memWriter struct { + *memFile + ms *memStorage + closed bool +} + +func (*memWriter) Sync() error { return nil } + +func (mw *memWriter) Close() error { + mw.ms.mu.Lock() + defer mw.ms.mu.Unlock() + if mw.closed { + return ErrClosed + } + mw.memFile.open = false + return nil +} + +func packFile(fd FileDesc) uint64 { + return uint64(fd.Num)<> typeShift)} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go new file mode 100644 index 0000000..4e4a724 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -0,0 +1,187 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package storage provides storage abstraction for LevelDB. +package storage + +import ( + "errors" + "fmt" + "io" +) + +// FileType represent a file type. +type FileType int + +// File types. +const ( + TypeManifest FileType = 1 << iota + TypeJournal + TypeTable + TypeTemp + + TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp +) + +func (t FileType) String() string { + switch t { + case TypeManifest: + return "manifest" + case TypeJournal: + return "journal" + case TypeTable: + return "table" + case TypeTemp: + return "temp" + } + return fmt.Sprintf("", t) +} + +// Common error. +var ( + ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") + ErrLocked = errors.New("leveldb/storage: already locked") + ErrClosed = errors.New("leveldb/storage: closed") +) + +// ErrCorrupted is the type that wraps errors that indicate corruption of +// a file. Package storage has its own type instead of using +// errors.ErrCorrupted to prevent circular import. +type ErrCorrupted struct { + Fd FileDesc + Err error +} + +func isCorrupted(err error) bool { + switch err.(type) { + case *ErrCorrupted: + return true + } + return false +} + +func (e *ErrCorrupted) Error() string { + if !e.Fd.Zero() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) + } + return e.Err.Error() +} + +// Syncer is the interface that wraps basic Sync method. +type Syncer interface { + // Sync commits the current contents of the file to stable storage. + Sync() error +} + +// Reader is the interface that groups the basic Read, Seek, ReadAt and Close +// methods. +type Reader interface { + io.ReadSeeker + io.ReaderAt + io.Closer +} + +// Writer is the interface that groups the basic Write, Sync and Close +// methods. +type Writer interface { + io.WriteCloser + Syncer +} + +// Locker is the interface that wraps Unlock method. +type Locker interface { + Unlock() +} + +// FileDesc is a 'file descriptor'. +type FileDesc struct { + Type FileType + Num int64 +} + +func (fd FileDesc) String() string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + return fmt.Sprintf("%#x-%d", fd.Type, fd.Num) + } +} + +// Zero returns true if fd == (FileDesc{}). +func (fd FileDesc) Zero() bool { + return fd == (FileDesc{}) +} + +// FileDescOk returns true if fd is a valid 'file descriptor'. +func FileDescOk(fd FileDesc) bool { + switch fd.Type { + case TypeManifest: + case TypeJournal: + case TypeTable: + case TypeTemp: + default: + return false + } + return fd.Num >= 0 +} + +// Storage is the storage. A storage instance must be safe for concurrent use. +type Storage interface { + // Lock locks the storage. Any subsequent attempt to call Lock will fail + // until the last lock released. + // Caller should call Unlock method after use. + Lock() (Locker, error) + + // Log logs a string. This is used for logging. + // An implementation may write to a file, stdout or simply do nothing. + Log(str string) + + // SetMeta store 'file descriptor' that can later be acquired using GetMeta + // method. The 'file descriptor' should point to a valid file. + // SetMeta should be implemented in such way that changes should happen + // atomically. + SetMeta(fd FileDesc) error + + // GetMeta returns 'file descriptor' stored in meta. The 'file descriptor' + // can be updated using SetMeta method. + // Returns os.ErrNotExist if meta doesn't store any 'file descriptor', or + // 'file descriptor' point to nonexistent file. + GetMeta() (FileDesc, error) + + // List returns file descriptors that match the given file types. + // The file types may be OR'ed together. + List(ft FileType) ([]FileDesc, error) + + // Open opens file with the given 'file descriptor' read-only. + // Returns os.ErrNotExist error if the file does not exist. + // Returns ErrClosed if the underlying storage is closed. + Open(fd FileDesc) (Reader, error) + + // Create creates file with the given 'file descriptor', truncate if already + // exist and opens write-only. + // Returns ErrClosed if the underlying storage is closed. + Create(fd FileDesc) (Writer, error) + + // Remove removes file with the given 'file descriptor'. + // Returns ErrClosed if the underlying storage is closed. + Remove(fd FileDesc) error + + // Rename renames file from oldfd to newfd. + // Returns ErrClosed if the underlying storage is closed. + Rename(oldfd, newfd FileDesc) error + + // Close closes the storage. + // It is valid to call Close multiple times. Other methods should not be + // called after the storage has been closed. + Close() error +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go new file mode 100644 index 0000000..1fac60d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go @@ -0,0 +1,531 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sort" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/table" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// tFile holds basic information about a table. +type tFile struct { + fd storage.FileDesc + seekLeft int32 + size int64 + imin, imax internalKey +} + +// Returns true if given key is after largest key of this table. +func (t *tFile) after(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 +} + +// Returns true if given key is before smallest key of this table. +func (t *tFile) before(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 +} + +// Returns true if given key range overlaps with this table key range. +func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { + return !t.after(icmp, umin) && !t.before(icmp, umax) +} + +// Cosumes one seek and return current seeks left. +func (t *tFile) consumeSeek() int32 { + return atomic.AddInt32(&t.seekLeft, -1) +} + +// Creates new tFile. +func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { + f := &tFile{ + fd: fd, + size: size, + imin: imin, + imax: imax, + } + + // We arrange to automatically compact this file after + // a certain number of seeks. Let's assume: + // (1) One seek costs 10ms + // (2) Writing or reading 1MB costs 10ms (100MB/s) + // (3) A compaction of 1MB does 25MB of IO: + // 1MB read from this level + // 10-12MB read from next level (boundaries may be misaligned) + // 10-12MB written to next level + // This implies that 25 seeks cost the same as the compaction + // of 1MB of data. I.e., one seek costs approximately the + // same as the compaction of 40KB of data. We are a little + // conservative and allow approximately one seek for every 16KB + // of data before triggering a compaction. + f.seekLeft = int32(size / 16384) + if f.seekLeft < 100 { + f.seekLeft = 100 + } + + return f +} + +func tableFileFromRecord(r atRecord) *tFile { + return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax) +} + +// tFiles hold multiple tFile. +type tFiles []*tFile + +func (tf tFiles) Len() int { return len(tf) } +func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } + +func (tf tFiles) nums() string { + x := "[ " + for i, f := range tf { + if i != 0 { + x += ", " + } + x += fmt.Sprint(f.fd.Num) + } + x += " ]" + return x +} + +// Returns true if i smallest key is less than j. +// This used for sort by key in ascending order. +func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { + a, b := tf[i], tf[j] + n := icmp.Compare(a.imin, b.imin) + if n == 0 { + return a.fd.Num < b.fd.Num + } + return n < 0 +} + +// Returns true if i file number is greater than j. +// This used for sort by file number in descending order. +func (tf tFiles) lessByNum(i, j int) bool { + return tf[i].fd.Num > tf[j].fd.Num +} + +// Sorts tables by key in ascending order. +func (tf tFiles) sortByKey(icmp *iComparer) { + sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) +} + +// Sorts tables by file number in descending order. +func (tf tFiles) sortByNum() { + sort.Sort(&tFilesSortByNum{tFiles: tf}) +} + +// Returns sum of all tables size. +func (tf tFiles) size() (sum int64) { + for _, t := range tf { + sum += t.size + } + return sum +} + +// Searches smallest index of tables whose its smallest +// key is after or equal with given key. +func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imin, ikey) >= 0 + }) +} + +// Searches smallest index of tables whose its largest +// key is after or equal with given key. +func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imax, ikey) >= 0 + }) +} + +// Returns true if given key range overlaps with one or more +// tables key range. If unsorted is true then binary search will not be used. +func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { + if unsorted { + // Check against all files. + for _, t := range tf { + if t.overlaps(icmp, umin, umax) { + return true + } + } + return false + } + + i := 0 + if len(umin) > 0 { + // Find the earliest possible internal key for min. + i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) + } + if i >= len(tf) { + // Beginning of range is after all files, so no overlap. + return false + } + return !tf[i].before(icmp, umax) +} + +// Returns tables whose its key range overlaps with given key range. +// Range will be expanded if ukey found hop across tables. +// If overlapped is true then the search will be restarted if umax +// expanded. +// The dst content will be overwritten. +func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { + dst = dst[:0] + for i := 0; i < len(tf); { + t := tf[i] + if t.overlaps(icmp, umin, umax) { + if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { + umin = t.imin.ukey() + dst = dst[:0] + i = 0 + continue + } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { + umax = t.imax.ukey() + // Restart search if it is overlapped. + if overlapped { + dst = dst[:0] + i = 0 + continue + } + } + + dst = append(dst, t) + } + i++ + } + + return dst +} + +// Returns tables key range. +func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { + for i, t := range tf { + if i == 0 { + imin, imax = t.imin, t.imax + continue + } + if icmp.Compare(t.imin, imin) < 0 { + imin = t.imin + } + if icmp.Compare(t.imax, imax) > 0 { + imax = t.imax + } + } + + return +} + +// Creates iterator index from tables. +func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { + if slice != nil { + var start, limit int + if slice.Start != nil { + start = tf.searchMax(icmp, internalKey(slice.Start)) + } + if slice.Limit != nil { + limit = tf.searchMin(icmp, internalKey(slice.Limit)) + } else { + limit = tf.Len() + } + tf = tf[start:limit] + } + return iterator.NewArrayIndexer(&tFilesArrayIndexer{ + tFiles: tf, + tops: tops, + icmp: icmp, + slice: slice, + ro: ro, + }) +} + +// Tables iterator index. +type tFilesArrayIndexer struct { + tFiles + tops *tOps + icmp *iComparer + slice *util.Range + ro *opt.ReadOptions +} + +func (a *tFilesArrayIndexer) Search(key []byte) int { + return a.searchMax(a.icmp, internalKey(key)) +} + +func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { + if i == 0 || i == a.Len()-1 { + return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) + } + return a.tops.newIterator(a.tFiles[i], nil, a.ro) +} + +// Helper type for sortByKey. +type tFilesSortByKey struct { + tFiles + icmp *iComparer +} + +func (x *tFilesSortByKey) Less(i, j int) bool { + return x.lessByKey(x.icmp, i, j) +} + +// Helper type for sortByNum. +type tFilesSortByNum struct { + tFiles +} + +func (x *tFilesSortByNum) Less(i, j int) bool { + return x.lessByNum(i, j) +} + +// Table operations. +type tOps struct { + s *session + noSync bool + evictRemoved bool + cache *cache.Cache + bcache *cache.Cache + bpool *util.BufferPool +} + +// Creates an empty table and returns table writer. +func (t *tOps) create() (*tWriter, error) { + fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()} + fw, err := t.s.stor.Create(fd) + if err != nil { + return nil, err + } + return &tWriter{ + t: t, + fd: fd, + w: fw, + tw: table.NewWriter(fw, t.s.o.Options), + }, nil +} + +// Builds table from src iterator. +func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { + w, err := t.create() + if err != nil { + return + } + + defer func() { + if err != nil { + w.drop() + } + }() + + for src.Next() { + err = w.append(src.Key(), src.Value()) + if err != nil { + return + } + } + err = src.Error() + if err != nil { + return + } + + n = w.tw.EntriesLen() + f, err = w.finish() + return +} + +// Opens table. It returns a cache handle, which should +// be released after use. +func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { + ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { + var r storage.Reader + r, err = t.s.stor.Open(f.fd) + if err != nil { + return 0, nil + } + + var bcache *cache.NamespaceGetter + if t.bcache != nil { + bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} + } + + var tr *table.Reader + tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) + if err != nil { + r.Close() + return 0, nil + } + return 1, tr + + }) + if ch == nil && err == nil { + err = ErrClosed + } + return +} + +// Finds key/value pair whose key is greater than or equal to the +// given key. +func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { + ch, err := t.open(f) + if err != nil { + return nil, nil, err + } + defer ch.Release() + return ch.Value().(*table.Reader).Find(key, true, ro) +} + +// Finds key that is greater than or equal to the given key. +func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { + ch, err := t.open(f) + if err != nil { + return nil, err + } + defer ch.Release() + return ch.Value().(*table.Reader).FindKey(key, true, ro) +} + +// Returns approximate offset of the given key. +func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { + ch, err := t.open(f) + if err != nil { + return + } + defer ch.Release() + return ch.Value().(*table.Reader).OffsetOf(key) +} + +// Creates an iterator from the given table. +func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + ch, err := t.open(f) + if err != nil { + return iterator.NewEmptyIterator(err) + } + iter := ch.Value().(*table.Reader).NewIterator(slice, ro) + iter.SetReleaser(ch) + return iter +} + +// Removes table from persistent storage. It waits until +// no one use the the table. +func (t *tOps) remove(f *tFile) { + t.cache.Delete(0, uint64(f.fd.Num), func() { + if err := t.s.stor.Remove(f.fd); err != nil { + t.s.logf("table@remove removing @%d %q", f.fd.Num, err) + } else { + t.s.logf("table@remove removed @%d", f.fd.Num) + } + if t.evictRemoved && t.bcache != nil { + t.bcache.EvictNS(uint64(f.fd.Num)) + } + }) +} + +// Closes the table ops instance. It will close all tables, +// regadless still used or not. +func (t *tOps) close() { + t.bpool.Close() + t.cache.Close() + if t.bcache != nil { + t.bcache.CloseWeak() + } +} + +// Creates new initialized table ops instance. +func newTableOps(s *session) *tOps { + var ( + cacher cache.Cacher + bcache *cache.Cache + bpool *util.BufferPool + ) + if s.o.GetOpenFilesCacheCapacity() > 0 { + cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) + } + if !s.o.GetDisableBlockCache() { + var bcacher cache.Cacher + if s.o.GetBlockCacheCapacity() > 0 { + bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) + } + bcache = cache.NewCache(bcacher) + } + if !s.o.GetDisableBufferPool() { + bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) + } + return &tOps{ + s: s, + noSync: s.o.GetNoSync(), + evictRemoved: s.o.GetBlockCacheEvictRemoved(), + cache: cache.NewCache(cacher), + bcache: bcache, + bpool: bpool, + } +} + +// tWriter wraps the table writer. It keep track of file descriptor +// and added key range. +type tWriter struct { + t *tOps + + fd storage.FileDesc + w storage.Writer + tw *table.Writer + + first, last []byte +} + +// Append key/value pair to the table. +func (w *tWriter) append(key, value []byte) error { + if w.first == nil { + w.first = append([]byte{}, key...) + } + w.last = append(w.last[:0], key...) + return w.tw.Append(key, value) +} + +// Returns true if the table is empty. +func (w *tWriter) empty() bool { + return w.first == nil +} + +// Closes the storage.Writer. +func (w *tWriter) close() { + if w.w != nil { + w.w.Close() + w.w = nil + } +} + +// Finalizes the table and returns table file. +func (w *tWriter) finish() (f *tFile, err error) { + defer w.close() + err = w.tw.Close() + if err != nil { + return + } + if !w.t.noSync { + err = w.w.Sync() + if err != nil { + return + } + } + f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) + return +} + +// Drops the table. +func (w *tWriter) drop() { + w.close() + w.t.s.stor.Remove(w.fd) + w.t.s.reuseFileNum(w.fd.Num) + w.tw = nil + w.first = nil + w.last = nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go new file mode 100644 index 0000000..496feb6 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go @@ -0,0 +1,1139 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "fmt" + "io" + "sort" + "strings" + "sync" + + "github.com/golang/snappy" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Reader errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrReaderReleased = errors.New("leveldb/table: reader released") + ErrIterReleased = errors.New("leveldb/table: iterator released") +) + +// ErrCorrupted describes error due to corruption. This error will be wrapped +// with errors.ErrCorrupted. +type ErrCorrupted struct { + Pos int64 + Size int64 + Kind string + Reason string +} + +func (e *ErrCorrupted) Error() string { + return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) +} + +func max(x, y int) int { + if x > y { + return x + } + return y +} + +type block struct { + bpool *util.BufferPool + bh blockHandle + data []byte + restartsLen int + restartsOffset int +} + +func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { + index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) + offset++ // shared always zero, since this is a restart point + v1, n1 := binary.Uvarint(b.data[offset:]) // key length + _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length + m := offset + n1 + n2 + return cmp.Compare(b.data[m:m+int(v1)], key) > 0 + }) + rstart - 1 + if index < rstart { + // The smallest key is greater-than key sought. + index = rstart + } + offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) + return +} + +func (b *block) restartIndex(rstart, rlimit, offset int) int { + return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset + }) + rstart - 1 +} + +func (b *block) restartOffset(index int) int { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) +} + +func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { + if offset >= b.restartsOffset { + if offset != b.restartsOffset { + err = &ErrCorrupted{Reason: "entries offset not aligned"} + } + return + } + v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length + v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length + v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length + m := n0 + n1 + n2 + n = m + int(v1) + int(v2) + if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { + err = &ErrCorrupted{Reason: "entries corrupted"} + return + } + key = b.data[offset+m : offset+m+int(v1)] + value = b.data[offset+m+int(v1) : offset+n] + nShared = int(v0) + return +} + +func (b *block) Release() { + b.bpool.Put(b.data) + b.bpool = nil + b.data = nil +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type blockIter struct { + tr *Reader + block *block + blockReleaser util.Releaser + releaser util.Releaser + key, value []byte + offset int + // Previous offset, only filled by Next. + prevOffset int + prevNode []int + prevKeys []byte + restartIndex int + // Iterator direction. + dir dir + // Restart index slice range. + riStart int + riLimit int + // Offset slice range. + offsetStart int + offsetRealStart int + offsetLimit int + // Error. + err error +} + +func (i *blockIter) sErr(err error) { + i.err = err + i.key = nil + i.value = nil + i.prevNode = nil + i.prevKeys = nil +} + +func (i *blockIter) reset() { + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.restartIndex = i.riStart + i.offset = i.offsetStart + i.dir = dirSOI + i.key = i.key[:0] + i.value = nil +} + +func (i *blockIter) isFirst() bool { + switch i.dir { + case dirForward: + return i.prevOffset == i.offsetRealStart + case dirBackward: + return len(i.prevNode) == 1 && i.restartIndex == i.riStart + } + return false +} + +func (i *blockIter) isLast() bool { + switch i.dir { + case dirForward, dirBackward: + return i.offset == i.offsetLimit + } + return false +} + +func (i *blockIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirSOI + return i.Next() +} + +func (i *blockIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirEOI + return i.Prev() +} + +func (i *blockIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) + if err != nil { + i.sErr(err) + return false + } + i.restartIndex = ri + i.offset = max(i.offsetStart, offset) + if i.dir == dirSOI || i.dir == dirEOI { + i.dir = dirForward + } + for i.Next() { + if i.tr.cmp.Compare(i.key, key) >= 0 { + return true + } + } + return false +} + +func (i *blockIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirSOI { + i.restartIndex = i.riStart + i.offset = i.offsetStart + } else if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + for i.offset < i.offsetRealStart { + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.offset += n + } + if i.offset >= i.offsetLimit { + i.dir = dirEOI + if i.offset != i.offsetLimit { + i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) + } + return false + } + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.prevOffset = i.offset + i.offset += n + i.dir = dirForward + return true +} + +func (i *blockIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + var ri int + if i.dir == dirForward { + // Change direction. + i.offset = i.prevOffset + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) + i.dir = dirBackward + } else if i.dir == dirEOI { + // At the end of iterator. + i.restartIndex = i.riLimit + i.offset = i.offsetLimit + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.riLimit - 1 + i.dir = dirBackward + } else if len(i.prevNode) == 1 { + // This is the end of a restart range. + i.offset = i.prevNode[0] + i.prevNode = i.prevNode[:0] + if i.restartIndex == i.riStart { + i.dir = dirSOI + return false + } + i.restartIndex-- + ri = i.restartIndex + } else { + // In the middle of restart range, get from cache. + n := len(i.prevNode) - 3 + node := i.prevNode[n:] + i.prevNode = i.prevNode[:n] + // Get the key. + ko := node[0] + i.key = append(i.key[:0], i.prevKeys[ko:]...) + i.prevKeys = i.prevKeys[:ko] + // Get the value. + vo := node[1] + vl := vo + node[2] + i.value = i.block.data[vo:vl] + i.offset = vl + return true + } + // Build entries cache. + i.key = i.key[:0] + i.value = nil + offset := i.block.restartOffset(ri) + if offset == i.offset { + ri-- + if ri < 0 { + i.dir = dirSOI + return false + } + offset = i.block.restartOffset(ri) + } + i.prevNode = append(i.prevNode, offset) + for { + key, value, nShared, n, err := i.block.entry(offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if offset >= i.offsetRealStart { + if i.value != nil { + // Appends 3 variables: + // 1. Previous keys offset + // 2. Value offset in the data block + // 3. Value length + i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) + i.prevKeys = append(i.prevKeys, i.key...) + } + i.value = value + } + i.key = append(i.key[:nShared], key...) + offset += n + // Stop if target offset reached. + if offset >= i.offset { + if offset != i.offset { + i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) + return false + } + + break + } + } + i.restartIndex = ri + i.offset = offset + return true +} + +func (i *blockIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *blockIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *blockIter) Release() { + if i.dir != dirReleased { + i.tr = nil + i.block = nil + i.prevNode = nil + i.prevKeys = nil + i.key = nil + i.value = nil + i.dir = dirReleased + if i.blockReleaser != nil { + i.blockReleaser.Release() + i.blockReleaser = nil + } + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *blockIter) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *blockIter) Valid() bool { + return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) +} + +func (i *blockIter) Error() error { + return i.err +} + +type filterBlock struct { + bpool *util.BufferPool + data []byte + oOffset int + baseLg uint + filtersNum int +} + +func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { + i := int(offset >> b.baseLg) + if i < b.filtersNum { + o := b.data[b.oOffset+i*4:] + n := int(binary.LittleEndian.Uint32(o)) + m := int(binary.LittleEndian.Uint32(o[4:])) + if n < m && m <= b.oOffset { + return filter.Contains(b.data[n:m], key) + } else if n == m { + return false + } + } + return true +} + +func (b *filterBlock) Release() { + b.bpool.Put(b.data) + b.bpool = nil + b.data = nil +} + +type indexIter struct { + *blockIter + tr *Reader + slice *util.Range + // Options + fillCache bool +} + +func (i *indexIter) Get() iterator.Iterator { + value := i.Value() + if value == nil { + return nil + } + dataBH, n := decodeBlockHandle(value) + if n == 0 { + return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) + } + + var slice *util.Range + if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { + slice = i.slice + } + return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) +} + +// Reader is a table reader. +type Reader struct { + mu sync.RWMutex + fd storage.FileDesc + reader io.ReaderAt + cache *cache.NamespaceGetter + err error + bpool *util.BufferPool + // Options + o *opt.Options + cmp comparer.Comparer + filter filter.Filter + verifyChecksum bool + + dataEnd int64 + metaBH, indexBH, filterBH blockHandle + indexBlock *block + filterBlock *filterBlock +} + +func (r *Reader) blockKind(bh blockHandle) string { + switch bh.offset { + case r.metaBH.offset: + return "meta-block" + case r.indexBH.offset: + return "index-block" + case r.filterBH.offset: + if r.filterBH.length > 0 { + return "filter-block" + } + } + return "data-block" +} + +func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { + return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} +} + +func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { + return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) +} + +func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { + if cerr, ok := err.(*ErrCorrupted); ok { + cerr.Pos = int64(bh.offset) + cerr.Size = int64(bh.length) + cerr.Kind = r.blockKind(bh) + return &errors.ErrCorrupted{Fd: r.fd, Err: cerr} + } + return err +} + +func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { + data := r.bpool.Get(int(bh.length + blockTrailerLen)) + if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { + return nil, err + } + + if verifyChecksum { + n := bh.length + 1 + checksum0 := binary.LittleEndian.Uint32(data[n:]) + checksum1 := util.NewCRC(data[:n]).Value() + if checksum0 != checksum1 { + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) + } + } + + switch data[bh.length] { + case blockTypeNoCompression: + data = data[:bh.length] + case blockTypeSnappyCompression: + decLen, err := snappy.DecodedLen(data[:bh.length]) + if err != nil { + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, err.Error()) + } + decData := r.bpool.Get(decLen) + decData, err = snappy.Decode(decData, data[:bh.length]) + r.bpool.Put(data) + if err != nil { + r.bpool.Put(decData) + return nil, r.newErrCorruptedBH(bh, err.Error()) + } + data = decData + default: + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) + } + return data, nil +} + +func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { + data, err := r.readRawBlock(bh, verifyChecksum) + if err != nil { + return nil, err + } + restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) + b := &block{ + bpool: r.bpool, + bh: bh, + data: data, + restartsLen: restartsLen, + restartsOffset: len(data) - (restartsLen+1)*4, + } + return b, nil +} + +func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { + if r.cache != nil { + var ( + err error + ch *cache.Handle + ) + if fillCache { + ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { + var b *block + b, err = r.readBlock(bh, verifyChecksum) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + } else { + ch = r.cache.Get(bh.offset, nil) + } + if ch != nil { + b, ok := ch.Value().(*block) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: inconsistent block type") + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readBlock(bh, verifyChecksum) + return b, b, err +} + +func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { + data, err := r.readRawBlock(bh, true) + if err != nil { + return nil, err + } + n := len(data) + if n < 5 { + return nil, r.newErrCorruptedBH(bh, "too short") + } + m := n - 5 + oOffset := int(binary.LittleEndian.Uint32(data[m:])) + if oOffset > m { + return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") + } + b := &filterBlock{ + bpool: r.bpool, + data: data, + oOffset: oOffset, + baseLg: uint(data[n-1]), + filtersNum: (m - oOffset) / 4, + } + return b, nil +} + +func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { + if r.cache != nil { + var ( + err error + ch *cache.Handle + ) + if fillCache { + ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { + var b *filterBlock + b, err = r.readFilterBlock(bh) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + } else { + ch = r.cache.Get(bh.offset, nil) + } + if ch != nil { + b, ok := ch.Value().(*filterBlock) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: inconsistent block type") + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readFilterBlock(bh) + return b, b, err +} + +func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { + if r.indexBlock == nil { + return r.readBlockCached(r.indexBH, true, fillCache) + } + return r.indexBlock, util.NoopReleaser{}, nil +} + +func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { + if r.filterBlock == nil { + return r.readFilterBlockCached(r.filterBH, fillCache) + } + return r.filterBlock, util.NoopReleaser{}, nil +} + +func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { + bi := &blockIter{ + tr: r, + block: b, + blockReleaser: bReleaser, + // Valid key should never be nil. + key: make([]byte, 0), + dir: dirSOI, + riStart: 0, + riLimit: b.restartsLen, + offsetStart: 0, + offsetRealStart: 0, + offsetLimit: b.restartsOffset, + } + if slice != nil { + if slice.Start != nil { + if bi.Seek(slice.Start) { + bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) + bi.offsetStart = b.restartOffset(bi.riStart) + bi.offsetRealStart = bi.prevOffset + } else { + bi.riStart = b.restartsLen + bi.offsetStart = b.restartsOffset + bi.offsetRealStart = b.restartsOffset + } + } + if slice.Limit != nil { + if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { + bi.offsetLimit = bi.prevOffset + bi.riLimit = bi.restartIndex + 1 + } + } + bi.reset() + if bi.offsetStart > bi.offsetLimit { + bi.sErr(errors.New("leveldb/table: invalid slice range")) + } + } + return bi +} + +func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { + b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + return r.newBlockIter(b, rel, slice, false) +} + +func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + return iterator.NewEmptyIterator(r.err) + } + + return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) +} + +// NewIterator creates an iterator from the table. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// table. And a nil Range.Limit is treated as a key after all keys in +// the table. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// +// The returned iterator is not safe for concurrent use and should be released +// after use. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + return iterator.NewEmptyIterator(r.err) + } + + fillCache := !ro.GetDontFillCache() + indexBlock, rel, err := r.getIndexBlock(fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + index := &indexIter{ + blockIter: r.newBlockIter(indexBlock, rel, slice, true), + tr: r, + slice: slice, + fillCache: !ro.GetDontFillCache(), + } + return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) +} + +func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.getIndexBlock(true) + if err != nil { + return + } + defer rel.Release() + + index := r.newBlockIter(indexBlock, nil, nil, true) + defer index.Release() + + if !index.Seek(key) { + if err = index.Error(); err == nil { + err = ErrNotFound + } + return + } + + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return nil, nil, r.err + } + + // The filter should only used for exact match. + if filtered && r.filter != nil { + filterBlock, frel, ferr := r.getFilterBlock(true) + if ferr == nil { + if !filterBlock.contains(r.filter, dataBH.offset, key) { + frel.Release() + return nil, nil, ErrNotFound + } + frel.Release() + } else if !errors.IsCorrupted(ferr) { + return nil, nil, ferr + } + } + + data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) + if !data.Seek(key) { + data.Release() + if err = data.Error(); err != nil { + return + } + + // The nearest greater-than key is the first key of the next block. + if !index.Next() { + if err = index.Error(); err == nil { + err = ErrNotFound + } + return + } + + dataBH, n = decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return nil, nil, r.err + } + + data = r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) + if !data.Next() { + data.Release() + if err = data.Error(); err == nil { + err = ErrNotFound + } + return + } + } + + // Key doesn't use block buffer, no need to copy the buffer. + rkey = data.Key() + if !noValue { + if r.bpool == nil { + value = data.Value() + } else { + // Value does use block buffer, and since the buffer will be + // recycled, it need to be copied. + value = append([]byte{}, data.Value()...) + } + } + data.Release() + return +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// If filtered is true then the nearest 'block' will be checked against +// 'filter data' (if present) and will immediately return ErrNotFound if +// 'filter data' indicates that such pair doesn't exist. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { + return r.find(key, filtered, ro, false) +} + +// FindKey finds key that is greater than or equal to the given key. +// It returns ErrNotFound if the table doesn't contain such key. +// If filtered is true then the nearest 'block' will be checked against +// 'filter data' (if present) and will immediately return ErrNotFound if +// 'filter data' indicates that such key doesn't exist. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { + rkey, _, err = r.find(key, filtered, ro, true) + return +} + +// Get gets the value for the given key. It returns errors.ErrNotFound +// if the table does not contain the key. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + rkey, value, err := r.find(key, false, ro, false) + if err == nil && r.cmp.Compare(rkey, key) != 0 { + value = nil + err = ErrNotFound + } + return +} + +// OffsetOf returns approximate offset for the given key. +// +// It is safe to modify the contents of the argument after Get returns. +func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) + if err != nil { + return + } + defer rel.Release() + + index := r.newBlockIter(indexBlock, nil, nil, true) + defer index.Release() + if index.Seek(key) { + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return + } + offset = int64(dataBH.offset) + return + } + err = index.Error() + if err == nil { + offset = r.dataEnd + } + return +} + +// Release implements util.Releaser. +// It also close the file if it is an io.Closer. +func (r *Reader) Release() { + r.mu.Lock() + defer r.mu.Unlock() + + if closer, ok := r.reader.(io.Closer); ok { + closer.Close() + } + if r.indexBlock != nil { + r.indexBlock.Release() + r.indexBlock = nil + } + if r.filterBlock != nil { + r.filterBlock.Release() + r.filterBlock = nil + } + r.reader = nil + r.cache = nil + r.bpool = nil + r.err = ErrReaderReleased +} + +// NewReader creates a new initialized table reader for the file. +// The fi, cache and bpool is optional and can be nil. +// +// The returned table reader instance is safe for concurrent use. +func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { + if f == nil { + return nil, errors.New("leveldb/table: nil file") + } + + r := &Reader{ + fd: fd, + reader: f, + cache: cache, + bpool: bpool, + o: o, + cmp: o.GetComparer(), + verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), + } + + if size < footerLen { + r.err = r.newErrCorrupted(0, size, "table", "too small") + return r, nil + } + + footerPos := size - footerLen + var footer [footerLen]byte + if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { + return nil, err + } + if string(footer[footerLen-len(magic):footerLen]) != magic { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") + return r, nil + } + + var n int + // Decode the metaindex block handle. + r.metaBH, n = decodeBlockHandle(footer[:]) + if n == 0 { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") + return r, nil + } + + // Decode the index block handle. + r.indexBH, n = decodeBlockHandle(footer[n:]) + if n == 0 { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") + return r, nil + } + + // Read metaindex block. + metaBlock, err := r.readBlock(r.metaBH, true) + if err != nil { + if errors.IsCorrupted(err) { + r.err = err + return r, nil + } + return nil, err + } + + // Set data end. + r.dataEnd = int64(r.metaBH.offset) + + // Read metaindex. + metaIter := r.newBlockIter(metaBlock, nil, nil, true) + for metaIter.Next() { + key := string(metaIter.Key()) + if !strings.HasPrefix(key, "filter.") { + continue + } + fn := key[7:] + if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { + r.filter = f0 + } else { + for _, f0 := range o.GetAltFilters() { + if f0.Name() == fn { + r.filter = f0 + break + } + } + } + if r.filter != nil { + filterBH, n := decodeBlockHandle(metaIter.Value()) + if n == 0 { + continue + } + r.filterBH = filterBH + // Update data end. + r.dataEnd = int64(filterBH.offset) + break + } + } + metaIter.Release() + metaBlock.Release() + + // Cache index and filter block locally, since we don't have global cache. + if cache == nil { + r.indexBlock, err = r.readBlock(r.indexBH, true) + if err != nil { + if errors.IsCorrupted(err) { + r.err = err + return r, nil + } + return nil, err + } + if r.filter != nil { + r.filterBlock, err = r.readFilterBlock(r.filterBH) + if err != nil { + if !errors.IsCorrupted(err) { + return nil, err + } + + // Don't use filter then. + r.filter = nil + } + } + } + + return r, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go new file mode 100644 index 0000000..beacdc1 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go @@ -0,0 +1,177 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package table allows read and write sorted key/value. +package table + +import ( + "encoding/binary" +) + +/* +Table: + +Table is consist of one or more data blocks, an optional filter block +a metaindex block, an index block and a table footer. Metaindex block +is a special block used to keep parameters of the table, such as filter +block name and its block handle. Index block is a special block used to +keep record of data blocks offset and length, index block use one as +restart interval. The key used by index block are the last key of preceding +block, shorter separator of adjacent blocks or shorter successor of the +last key of the last block. Filter block is an optional block contains +sequence of filter data generated by a filter generator. + +Table data structure: + + optional + / + +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ + | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | + +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ + + Each block followed by a 5-bytes trailer contains compression type and checksum. + +Table block trailer: + + +---------------------------+-------------------+ + | compression type (1-byte) | checksum (4-byte) | + +---------------------------+-------------------+ + + The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression + type also included in the checksum. + +Table footer: + + +------------------- 40-bytes -------------------+ + / \ + +------------------------+--------------------+------+-----------------+ + | metaindex block handle / index block handle / ---- | magic (8-bytes) | + +------------------------+--------------------+------+-----------------+ + + The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Block: + +Block is consist of one or more key/value entries and a block trailer. +Block entry shares key prefix with its preceding key until a restart +point reached. A block should contains at least one restart point. +First restart point are always zero. + +Block data structure: + + + restart point + restart point (depends on restart interval) + / / + +---------------+---------------+---------------+---------------+---------+ + | block entry 1 | block entry 2 | ... | block entry n | trailer | + +---------------+---------------+---------------+---------------+---------+ + +Key/value entry: + + +---- key len ----+ + / \ + +-------+---------+-----------+---------+--------------------+--------------+----------------+ + | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | + +-----------------+---------------------+--------------------+--------------+----------------+ + + Block entry shares key prefix with its preceding key: + Conditions: + restart_interval=2 + entry one : key=deck,value=v1 + entry two : key=dock,value=v2 + entry three: key=duck,value=v3 + The entries will be encoded as follow: + + + restart point (offset=0) + restart point (offset=16) + / / + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + \ / \ / \ / + +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ + + The block trailer will contains two restart points: + + +------------+-----------+--------+ + | 0 | 16 | 2 | + +------------+-----------+---+----+ + \ / \ + +-- restart points --+ + restart points length + +Block trailer: + + +-- 4-bytes --+ + / \ + +-----------------+-----------------+-----------------+------------------------------+ + | restart point 1 | .... | restart point n | restart points len (4-bytes) | + +-----------------+-----------------+-----------------+------------------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Filter block: + +Filter block consist of one or more filter data and a filter block trailer. +The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. + +Filter block data structure: + + + offset 1 + offset 2 + offset n + trailer offset + / / / / + +---------------+---------------+---------------+---------+ + | filter data 1 | ... | filter data n | trailer | + +---------------+---------------+---------------+---------+ + +Filter block trailer: + + +- 4-bytes -+ + / \ + +---------------+---------------+---------------+-------------------------------+------------------+ + | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | + +-------------- +---------------+---------------+-------------------------------+------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +const ( + blockTrailerLen = 5 + footerLen = 48 + + magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" + + // The block type gives the per-block compression format. + // These constants are part of the file format and should not be changed. + blockTypeNoCompression = 0 + blockTypeSnappyCompression = 1 + + // Generate new filter every 2KB of data + filterBaseLg = 11 + filterBase = 1 << filterBaseLg +) + +type blockHandle struct { + offset, length uint64 +} + +func decodeBlockHandle(src []byte) (blockHandle, int) { + offset, n := binary.Uvarint(src) + length, m := binary.Uvarint(src[n:]) + if n == 0 || m == 0 { + return blockHandle{}, 0 + } + return blockHandle{offset, length}, n + m +} + +func encodeBlockHandle(dst []byte, b blockHandle) int { + n := binary.PutUvarint(dst, b.offset) + m := binary.PutUvarint(dst[n:], b.length) + return n + m +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go new file mode 100644 index 0000000..b96b271 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go @@ -0,0 +1,375 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/golang/snappy" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +func sharedPrefixLen(a, b []byte) int { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for i < n && a[i] == b[i] { + i++ + } + return i +} + +type blockWriter struct { + restartInterval int + buf util.Buffer + nEntries int + prevKey []byte + restarts []uint32 + scratch []byte +} + +func (w *blockWriter) append(key, value []byte) { + nShared := 0 + if w.nEntries%w.restartInterval == 0 { + w.restarts = append(w.restarts, uint32(w.buf.Len())) + } else { + nShared = sharedPrefixLen(w.prevKey, key) + } + n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) + w.buf.Write(w.scratch[:n]) + w.buf.Write(key[nShared:]) + w.buf.Write(value) + w.prevKey = append(w.prevKey[:0], key...) + w.nEntries++ +} + +func (w *blockWriter) finish() { + // Write restarts entry. + if w.nEntries == 0 { + // Must have at least one restart entry. + w.restarts = append(w.restarts, 0) + } + w.restarts = append(w.restarts, uint32(len(w.restarts))) + for _, x := range w.restarts { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } +} + +func (w *blockWriter) reset() { + w.buf.Reset() + w.nEntries = 0 + w.restarts = w.restarts[:0] +} + +func (w *blockWriter) bytesLen() int { + restartsLen := len(w.restarts) + if restartsLen == 0 { + restartsLen = 1 + } + return w.buf.Len() + 4*restartsLen + 4 +} + +type filterWriter struct { + generator filter.FilterGenerator + buf util.Buffer + nKeys int + offsets []uint32 +} + +func (w *filterWriter) add(key []byte) { + if w.generator == nil { + return + } + w.generator.Add(key) + w.nKeys++ +} + +func (w *filterWriter) flush(offset uint64) { + if w.generator == nil { + return + } + for x := int(offset / filterBase); x > len(w.offsets); { + w.generate() + } +} + +func (w *filterWriter) finish() { + if w.generator == nil { + return + } + // Generate last keys. + + if w.nKeys > 0 { + w.generate() + } + w.offsets = append(w.offsets, uint32(w.buf.Len())) + for _, x := range w.offsets { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } + w.buf.WriteByte(filterBaseLg) +} + +func (w *filterWriter) generate() { + // Record offset. + w.offsets = append(w.offsets, uint32(w.buf.Len())) + // Generate filters. + if w.nKeys > 0 { + w.generator.Generate(&w.buf) + w.nKeys = 0 + } +} + +// Writer is a table writer. +type Writer struct { + writer io.Writer + err error + // Options + cmp comparer.Comparer + filter filter.Filter + compression opt.Compression + blockSize int + + dataBlock blockWriter + indexBlock blockWriter + filterBlock filterWriter + pendingBH blockHandle + offset uint64 + nEntries int + // Scratch allocated enough for 5 uvarint. Block writer should not use + // first 20-bytes since it will be used to encode block handle, which + // then passed to the block writer itself. + scratch [50]byte + comparerScratch []byte + compressionScratch []byte +} + +func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { + // Compress the buffer if necessary. + var b []byte + if compression == opt.SnappyCompression { + // Allocate scratch enough for compression and block trailer. + if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { + w.compressionScratch = make([]byte, n) + } + compressed := snappy.Encode(w.compressionScratch, buf.Bytes()) + n := len(compressed) + b = compressed[:n+blockTrailerLen] + b[n] = blockTypeSnappyCompression + } else { + tmp := buf.Alloc(blockTrailerLen) + tmp[0] = blockTypeNoCompression + b = buf.Bytes() + } + + // Calculate the checksum. + n := len(b) - 4 + checksum := util.NewCRC(b[:n]).Value() + binary.LittleEndian.PutUint32(b[n:], checksum) + + // Write the buffer to the file. + _, err = w.writer.Write(b) + if err != nil { + return + } + bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} + w.offset += uint64(len(b)) + return +} + +func (w *Writer) flushPendingBH(key []byte) { + if w.pendingBH.length == 0 { + return + } + var separator []byte + if len(key) == 0 { + separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) + } else { + separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) + } + if separator == nil { + separator = w.dataBlock.prevKey + } else { + w.comparerScratch = separator + } + n := encodeBlockHandle(w.scratch[:20], w.pendingBH) + // Append the block handle to the index block. + w.indexBlock.append(separator, w.scratch[:n]) + // Reset prev key of the data block. + w.dataBlock.prevKey = w.dataBlock.prevKey[:0] + // Clear pending block handle. + w.pendingBH = blockHandle{} +} + +func (w *Writer) finishBlock() error { + w.dataBlock.finish() + bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + return err + } + w.pendingBH = bh + // Reset the data block. + w.dataBlock.reset() + // Flush the filter block. + w.filterBlock.flush(w.offset) + return nil +} + +// Append appends key/value pair to the table. The keys passed must +// be in increasing order. +// +// It is safe to modify the contents of the arguments after Append returns. +func (w *Writer) Append(key, value []byte) error { + if w.err != nil { + return w.err + } + if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { + w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) + return w.err + } + + w.flushPendingBH(key) + // Append key/value pair to the data block. + w.dataBlock.append(key, value) + // Add key to the filter block. + w.filterBlock.add(key) + + // Finish the data block if block size target reached. + if w.dataBlock.bytesLen() >= w.blockSize { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.nEntries++ + return nil +} + +// BlocksLen returns number of blocks written so far. +func (w *Writer) BlocksLen() int { + n := w.indexBlock.nEntries + if w.pendingBH.length > 0 { + // Includes the pending block. + n++ + } + return n +} + +// EntriesLen returns number of entries added so far. +func (w *Writer) EntriesLen() int { + return w.nEntries +} + +// BytesLen returns number of bytes written so far. +func (w *Writer) BytesLen() int { + return int(w.offset) +} + +// Close will finalize the table. Calling Append is not possible +// after Close, but calling BlocksLen, EntriesLen and BytesLen +// is still possible. +func (w *Writer) Close() error { + if w.err != nil { + return w.err + } + + // Write the last data block. Or empty data block if there + // aren't any data blocks at all. + if w.dataBlock.nEntries > 0 || w.nEntries == 0 { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.flushPendingBH(nil) + + // Write the filter block. + var filterBH blockHandle + w.filterBlock.finish() + if buf := &w.filterBlock.buf; buf.Len() > 0 { + filterBH, w.err = w.writeBlock(buf, opt.NoCompression) + if w.err != nil { + return w.err + } + } + + // Write the metaindex block. + if filterBH.length > 0 { + key := []byte("filter." + w.filter.Name()) + n := encodeBlockHandle(w.scratch[:20], filterBH) + w.dataBlock.append(key, w.scratch[:n]) + } + w.dataBlock.finish() + metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the index block. + w.indexBlock.finish() + indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the table footer. + footer := w.scratch[:footerLen] + for i := range footer { + footer[i] = 0 + } + n := encodeBlockHandle(footer, metaindexBH) + encodeBlockHandle(footer[n:], indexBH) + copy(footer[footerLen-len(magic):], magic) + if _, err := w.writer.Write(footer); err != nil { + w.err = err + return w.err + } + w.offset += footerLen + + w.err = errors.New("leveldb/table: writer is closed") + return nil +} + +// NewWriter creates a new initialized table writer for the file. +// +// Table writer is not safe for concurrent use. +func NewWriter(f io.Writer, o *opt.Options) *Writer { + w := &Writer{ + writer: f, + cmp: o.GetComparer(), + filter: o.GetFilter(), + compression: o.GetCompression(), + blockSize: o.GetBlockSize(), + comparerScratch: make([]byte, 0), + } + // data block + w.dataBlock.restartInterval = o.GetBlockRestartInterval() + // The first 20-bytes are used for encoding block handle. + w.dataBlock.scratch = w.scratch[20:] + // index block + w.indexBlock.restartInterval = 1 + w.indexBlock.scratch = w.scratch[20:] + // filter block + if w.filter != nil { + w.filterBlock.generator = w.filter.NewGenerator() + w.filterBlock.flush(0) + } + return w +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go new file mode 100644 index 0000000..0e2b519 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go @@ -0,0 +1,98 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sort" + + "github.com/syndtr/goleveldb/leveldb/storage" +) + +func shorten(str string) string { + if len(str) <= 8 { + return str + } + return str[:3] + ".." + str[len(str)-3:] +} + +var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"} + +func shortenb(bytes int) string { + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%d%sB", bytes, bunits[i]) +} + +func sshortenb(bytes int) string { + if bytes == 0 { + return "~" + } + sign := "+" + if bytes < 0 { + sign = "-" + bytes *= -1 + } + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) +} + +func sint(x int) string { + if x == 0 { + return "~" + } + sign := "+" + if x < 0 { + sign = "-" + x *= -1 + } + return fmt.Sprintf("%s%d", sign, x) +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} + +type fdSorter []storage.FileDesc + +func (p fdSorter) Len() int { + return len(p) +} + +func (p fdSorter) Less(i, j int) bool { + return p[i].Num < p[j].Num +} + +func (p fdSorter) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func sortFds(fds []storage.FileDesc) { + sort.Sort(fdSorter(fds)) +} + +func ensureBuffer(b []byte, n int) []byte { + if cap(b) < n { + return make([]byte, n) + } + return b[:n] +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go new file mode 100644 index 0000000..21de242 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go @@ -0,0 +1,293 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package util + +// This a copy of Go std bytes.Buffer with some modification +// and some features stripped. + +import ( + "bytes" + "io" +) + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. +} + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + switch { + case n < 0 || n > b.Len(): + panic("leveldb/util.Buffer: truncation out of range") + case n == 0: + // Reuse buffer space. + b.off = 0 + } + b.buf = b.buf[0 : b.off+n] +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) grow(n int) int { + m := b.Len() + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Truncate(0) + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if b.buf == nil && n <= len(b.bootstrap) { + buf = b.bootstrap[0:] + } else if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + } + b.buf = buf + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Alloc allocs n bytes of slice from the buffer, growing the buffer as +// needed. If n is negative, Alloc will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Alloc(n int) []byte { + if n < 0 { + panic("leveldb/util.Buffer.Alloc: negative count") + } + m := b.grow(n) + return b.buf[m:] +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("leveldb/util.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with bytes.ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + newBuf := b.buf + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + MinRead) + } + copy(newBuf, b.buf[b.off:]) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with bytes.ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(bytes.ErrTooLarge) + } + }() + return make([]byte, n) +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("leveldb/util.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// bytes.ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + m := b.grow(1) + b.buf[m] = c + return nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + return c, nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + return line, err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go new file mode 100644 index 0000000..2f3db97 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go @@ -0,0 +1,239 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "fmt" + "sync" + "sync/atomic" + "time" +) + +type buffer struct { + b []byte + miss int +} + +// BufferPool is a 'buffer pool'. +type BufferPool struct { + pool [6]chan []byte + size [5]uint32 + sizeMiss [5]uint32 + sizeHalf [5]uint32 + baseline [4]int + baseline0 int + + mu sync.RWMutex + closed bool + closeC chan struct{} + + get uint32 + put uint32 + half uint32 + less uint32 + equal uint32 + greater uint32 + miss uint32 +} + +func (p *BufferPool) poolNum(n int) int { + if n <= p.baseline0 && n > p.baseline0/2 { + return 0 + } + for i, x := range p.baseline { + if n <= x { + return i + 1 + } + } + return len(p.baseline) + 1 +} + +// Get returns buffer with length of n. +func (p *BufferPool) Get(n int) []byte { + if p == nil { + return make([]byte, n) + } + + p.mu.RLock() + defer p.mu.RUnlock() + + if p.closed { + return make([]byte, n) + } + + atomic.AddUint32(&p.get, 1) + + poolNum := p.poolNum(n) + pool := p.pool[poolNum] + if poolNum == 0 { + // Fast path. + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + select { + case pool <- b: + default: + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + } + default: + atomic.AddUint32(&p.miss, 1) + } + + return make([]byte, n, p.baseline0) + } else { + sizePtr := &p.size[poolNum-1] + + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + sizeHalfPtr := &p.sizeHalf[poolNum-1] + if atomic.AddUint32(sizeHalfPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) + atomic.StoreUint32(sizeHalfPtr, 0) + } else { + select { + case pool <- b: + default: + } + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { + select { + case pool <- b: + default: + } + } + } + default: + atomic.AddUint32(&p.miss, 1) + } + + if size := atomic.LoadUint32(sizePtr); uint32(n) > size { + if size == 0 { + atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) + } else { + sizeMissPtr := &p.sizeMiss[poolNum-1] + if atomic.AddUint32(sizeMissPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(n)) + atomic.StoreUint32(sizeMissPtr, 0) + } + } + return make([]byte, n) + } else { + return make([]byte, n, size) + } + } +} + +// Put adds given buffer to the pool. +func (p *BufferPool) Put(b []byte) { + if p == nil { + return + } + + p.mu.RLock() + defer p.mu.RUnlock() + + if p.closed { + return + } + + atomic.AddUint32(&p.put, 1) + + pool := p.pool[p.poolNum(cap(b))] + select { + case pool <- b: + default: + } + +} + +func (p *BufferPool) Close() { + if p == nil { + return + } + + p.mu.Lock() + if !p.closed { + p.closed = true + p.closeC <- struct{}{} + } + p.mu.Unlock() +} + +func (p *BufferPool) String() string { + if p == nil { + return "" + } + + return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", + p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) +} + +func (p *BufferPool) drain() { + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + for _, ch := range p.pool { + select { + case <-ch: + default: + } + } + case <-p.closeC: + close(p.closeC) + for _, ch := range p.pool { + close(ch) + } + return + } + } +} + +// NewBufferPool creates a new initialized 'buffer pool'. +func NewBufferPool(baseline int) *BufferPool { + if baseline <= 0 { + panic("baseline can't be <= 0") + } + p := &BufferPool{ + baseline0: baseline, + baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, + closeC: make(chan struct{}, 1), + } + for i, cap := range []int{2, 2, 4, 4, 2, 1} { + p.pool[i] = make(chan []byte, cap) + } + go p.drain() + return p +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go new file mode 100644 index 0000000..631c9d6 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go @@ -0,0 +1,30 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "hash/crc32" +) + +var table = crc32.MakeTable(crc32.Castagnoli) + +// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. +type CRC uint32 + +// NewCRC creates a new crc based on the given bytes. +func NewCRC(b []byte) CRC { + return CRC(0).Update(b) +} + +// Update updates the crc with the given bytes. +func (c CRC) Update(b []byte) CRC { + return CRC(crc32.Update(uint32(c), table, b)) +} + +// Value returns a masked crc. +func (c CRC) Value() uint32 { + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go new file mode 100644 index 0000000..7f3fa4e --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go @@ -0,0 +1,48 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "encoding/binary" +) + +// Hash return hash of the given data. +func Hash(data []byte, seed uint32) uint32 { + // Similar to murmur hash + const ( + m = uint32(0xc6a4a793) + r = uint32(24) + ) + var ( + h = seed ^ (uint32(len(data)) * m) + i int + ) + + for n := len(data) - len(data)%4; i < n; i += 4 { + h += binary.LittleEndian.Uint32(data[i:]) + h *= m + h ^= (h >> 16) + } + + switch len(data) - i { + default: + panic("not reached") + case 3: + h += uint32(data[i+2]) << 16 + fallthrough + case 2: + h += uint32(data[i+1]) << 8 + fallthrough + case 1: + h += uint32(data[i]) + h *= m + h ^= (h >> r) + case 0: + } + + return h +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go new file mode 100644 index 0000000..8515958 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go @@ -0,0 +1,32 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +// Range is a key range. +type Range struct { + // Start of the key range, include in the range. + Start []byte + + // Limit of the key range, not include in the range. + Limit []byte +} + +// BytesPrefix returns key range that satisfy the given prefix. +// This only applicable for the standard 'bytes comparer'. +func BytesPrefix(prefix []byte) *Range { + var limit []byte + for i := len(prefix) - 1; i >= 0; i-- { + c := prefix[i] + if c < 0xff { + limit = make([]byte, i+1) + copy(limit, prefix) + limit[i] = c + 1 + break + } + } + return &Range{prefix, limit} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go new file mode 100644 index 0000000..80614af --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go @@ -0,0 +1,73 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package util provides utilities used throughout leveldb. +package util + +import ( + "errors" +) + +var ( + ErrReleased = errors.New("leveldb: resource already relesed") + ErrHasReleaser = errors.New("leveldb: releaser already defined") +) + +// Releaser is the interface that wraps the basic Release method. +type Releaser interface { + // Release releases associated resources. Release should always success + // and can be called multiple times without causing error. + Release() +} + +// ReleaseSetter is the interface that wraps the basic SetReleaser method. +type ReleaseSetter interface { + // SetReleaser associates the given releaser to the resources. The + // releaser will be called once coresponding resources released. + // Calling SetReleaser with nil will clear the releaser. + // + // This will panic if a releaser already present or coresponding + // resource is already released. Releaser should be cleared first + // before assigned a new one. + SetReleaser(releaser Releaser) +} + +// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. +type BasicReleaser struct { + releaser Releaser + released bool +} + +// Released returns whether Release method already called. +func (r *BasicReleaser) Released() bool { + return r.released +} + +// Release implements Releaser.Release. +func (r *BasicReleaser) Release() { + if !r.released { + if r.releaser != nil { + r.releaser.Release() + r.releaser = nil + } + r.released = true + } +} + +// SetReleaser implements ReleaseSetter.SetReleaser. +func (r *BasicReleaser) SetReleaser(releaser Releaser) { + if r.released { + panic(ErrReleased) + } + if r.releaser != nil && releaser != nil { + panic(ErrHasReleaser) + } + r.releaser = releaser +} + +type NoopReleaser struct{} + +func (NoopReleaser) Release() {} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go new file mode 100644 index 0000000..73f272a --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/version.go @@ -0,0 +1,528 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sync/atomic" + "unsafe" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type tSet struct { + level int + table *tFile +} + +type version struct { + s *session + + levels []tFiles + + // Level that should be compacted next and its compaction score. + // Score < 1 means compaction is not strictly needed. These fields + // are initialized by computeCompaction() + cLevel int + cScore float64 + + cSeek unsafe.Pointer + + closing bool + ref int + released bool +} + +func newVersion(s *session) *version { + return &version{s: s} +} + +func (v *version) incref() { + if v.released { + panic("already released") + } + + v.ref++ + if v.ref == 1 { + // Incr file ref. + for _, tt := range v.levels { + for _, t := range tt { + v.s.addFileRef(t.fd, 1) + } + } + } +} + +func (v *version) releaseNB() { + v.ref-- + if v.ref > 0 { + return + } else if v.ref < 0 { + panic("negative version ref") + } + + for _, tt := range v.levels { + for _, t := range tt { + if v.s.addFileRef(t.fd, -1) == 0 { + v.s.tops.remove(t) + } + } + } + + v.released = true +} + +func (v *version) release() { + v.s.vmu.Lock() + v.releaseNB() + v.s.vmu.Unlock() +} + +func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) { + ukey := ikey.ukey() + + // Aux level. + if aux != nil { + for _, t := range aux { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(-1, t) { + return + } + } + } + + if lf != nil && !lf(-1) { + return + } + } + + // Walk tables level-by-level. + for level, tables := range v.levels { + if len(tables) == 0 { + continue + } + + if level == 0 { + // Level-0 files may overlap each other. Find all files that + // overlap ukey. + for _, t := range tables { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(level, t) { + return + } + } + } + } else { + if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { + t := tables[i] + if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + if !f(level, t) { + return + } + } + } + } + + if lf != nil && !lf(level) { + return + } + } +} + +func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { + if v.closing { + return nil, false, ErrClosed + } + + ukey := ikey.ukey() + + var ( + tset *tSet + tseek bool + + // Level-0. + zfound bool + zseq uint64 + zkt keyType + zval []byte + ) + + err = ErrNotFound + + // Since entries never hop across level, finding key/value + // in smaller level make later levels irrelevant. + v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool { + if level >= 0 && !tseek { + if tset == nil { + tset = &tSet{level, t} + } else { + tseek = true + } + } + + var ( + fikey, fval []byte + ferr error + ) + if noValue { + fikey, ferr = v.s.tops.findKey(t, ikey, ro) + } else { + fikey, fval, ferr = v.s.tops.find(t, ikey, ro) + } + + switch ferr { + case nil: + case ErrNotFound: + return true + default: + err = ferr + return false + } + + if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil { + if v.s.icmp.uCompare(ukey, fukey) == 0 { + // Level <= 0 may overlaps each-other. + if level <= 0 { + if fseq >= zseq { + zfound = true + zseq = fseq + zkt = fkt + zval = fval + } + } else { + switch fkt { + case keyTypeVal: + value = fval + err = nil + case keyTypeDel: + default: + panic("leveldb: invalid internalKey type") + } + return false + } + } + } else { + err = fkerr + return false + } + + return true + }, func(level int) bool { + if zfound { + switch zkt { + case keyTypeVal: + value = zval + err = nil + case keyTypeDel: + default: + panic("leveldb: invalid internalKey type") + } + return false + } + + return true + }) + + if tseek && tset.table.consumeSeek() <= 0 { + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + + return +} + +func (v *version) sampleSeek(ikey internalKey) (tcomp bool) { + var tset *tSet + + v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool { + if tset == nil { + tset = &tSet{level, t} + return true + } + if tset.table.consumeSeek() <= 0 { + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + return false + }, nil) + + return +} + +func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { + strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) + for level, tables := range v.levels { + if level == 0 { + // Merge all level zero files together since they may overlap. + for _, t := range tables { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + } else if len(tables) != 0 { + its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)) + } + } + return +} + +func (v *version) newStaging() *versionStaging { + return &versionStaging{base: v} +} + +// Spawn a new version based on this version. +func (v *version) spawn(r *sessionRecord) *version { + staging := v.newStaging() + staging.commit(r) + return staging.finish() +} + +func (v *version) fillRecord(r *sessionRecord) { + for level, tables := range v.levels { + for _, t := range tables { + r.addTableFile(level, t) + } + } +} + +func (v *version) tLen(level int) int { + if level < len(v.levels) { + return len(v.levels[level]) + } + return 0 +} + +func (v *version) offsetOf(ikey internalKey) (n int64, err error) { + for level, tables := range v.levels { + for _, t := range tables { + if v.s.icmp.Compare(t.imax, ikey) <= 0 { + // Entire file is before "ikey", so just add the file size + n += t.size + } else if v.s.icmp.Compare(t.imin, ikey) > 0 { + // Entire file is after "ikey", so ignore + if level > 0 { + // Files other than level 0 are sorted by meta->min, so + // no further files in this level will contain data for + // "ikey". + break + } + } else { + // "ikey" falls in the range for this table. Add the + // approximate offset of "ikey" within the table. + if m, err := v.s.tops.offsetOf(t, ikey); err == nil { + n += m + } else { + return 0, err + } + } + } + } + + return +} + +func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) { + if maxLevel > 0 { + if len(v.levels) == 0 { + return maxLevel + } + if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) { + var overlaps tFiles + for ; level < maxLevel; level++ { + if pLevel := level + 1; pLevel >= len(v.levels) { + return maxLevel + } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) { + break + } + if gpLevel := level + 2; gpLevel < len(v.levels) { + overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false) + if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) { + break + } + } + } + } + } + return +} + +func (v *version) computeCompaction() { + // Precomputed best level for next compaction + bestLevel := int(-1) + bestScore := float64(-1) + + statFiles := make([]int, len(v.levels)) + statSizes := make([]string, len(v.levels)) + statScore := make([]string, len(v.levels)) + statTotSize := int64(0) + + for level, tables := range v.levels { + var score float64 + size := tables.size() + if level == 0 { + // We treat level-0 specially by bounding the number of files + // instead of number of bytes for two reasons: + // + // (1) With larger write-buffer sizes, it is nice not to do too + // many level-0 compaction. + // + // (2) The files in level-0 are merged on every read and + // therefore we wish to avoid too many files when the individual + // file size is small (perhaps because of a small write-buffer + // setting, or very high compression ratios, or lots of + // overwrites/deletions). + score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) + } else { + score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level)) + } + + if score > bestScore { + bestLevel = level + bestScore = score + } + + statFiles[level] = len(tables) + statSizes[level] = shortenb(int(size)) + statScore[level] = fmt.Sprintf("%.2f", score) + statTotSize += size + } + + v.cLevel = bestLevel + v.cScore = bestScore + + v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore) +} + +func (v *version) needCompaction() bool { + return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil +} + +type tablesScratch struct { + added map[int64]atRecord + deleted map[int64]struct{} +} + +type versionStaging struct { + base *version + levels []tablesScratch +} + +func (p *versionStaging) getScratch(level int) *tablesScratch { + if level >= len(p.levels) { + newLevels := make([]tablesScratch, level+1) + copy(newLevels, p.levels) + p.levels = newLevels + } + return &(p.levels[level]) +} + +func (p *versionStaging) commit(r *sessionRecord) { + // Deleted tables. + for _, r := range r.deletedTables { + scratch := p.getScratch(r.level) + if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 { + if scratch.deleted == nil { + scratch.deleted = make(map[int64]struct{}) + } + scratch.deleted[r.num] = struct{}{} + } + if scratch.added != nil { + delete(scratch.added, r.num) + } + } + + // New tables. + for _, r := range r.addedTables { + scratch := p.getScratch(r.level) + if scratch.added == nil { + scratch.added = make(map[int64]atRecord) + } + scratch.added[r.num] = r + if scratch.deleted != nil { + delete(scratch.deleted, r.num) + } + } +} + +func (p *versionStaging) finish() *version { + // Build new version. + nv := newVersion(p.base.s) + numLevel := len(p.levels) + if len(p.base.levels) > numLevel { + numLevel = len(p.base.levels) + } + nv.levels = make([]tFiles, numLevel) + for level := 0; level < numLevel; level++ { + var baseTabels tFiles + if level < len(p.base.levels) { + baseTabels = p.base.levels[level] + } + + if level < len(p.levels) { + scratch := p.levels[level] + + var nt tFiles + // Prealloc list if possible. + if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 { + nt = make(tFiles, 0, n) + } + + // Base tables. + for _, t := range baseTabels { + if _, ok := scratch.deleted[t.fd.Num]; ok { + continue + } + if _, ok := scratch.added[t.fd.Num]; ok { + continue + } + nt = append(nt, t) + } + + // New tables. + for _, r := range scratch.added { + nt = append(nt, tableFileFromRecord(r)) + } + + if len(nt) != 0 { + // Sort tables. + if level == 0 { + nt.sortByNum() + } else { + nt.sortByKey(p.base.s.icmp) + } + + nv.levels[level] = nt + } + } else { + nv.levels[level] = baseTabels + } + } + + // Trim levels. + n := len(nv.levels) + for ; n > 0 && nv.levels[n-1] == nil; n-- { + } + nv.levels = nv.levels[:n] + + // Compute compaction score for new version. + nv.computeCompaction() + + return nv +} + +type versionReleaser struct { + v *version + once bool +} + +func (vr *versionReleaser) Release() { + v := vr.v + v.s.vmu.Lock() + if !vr.once { + v.releaseNB() + vr.once = true + } + v.s.vmu.Unlock() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e83bba8..24f7a5a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,17 +1,44 @@ # github.com/go-sql-driver/mysql v1.5.0 ## explicit github.com/go-sql-driver/mysql +# github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db +github.com/golang/snappy # github.com/jinzhu/gorm v1.9.12 ## explicit -github.com/jinzhu/gorm -# github.com/jinzhu/inflection v1.0.0 -github.com/jinzhu/inflection # github.com/jmoiron/sqlx v1.2.0 ## explicit -github.com/jmoiron/sqlx -github.com/jmoiron/sqlx/reflectx # github.com/smartystreets/goconvey v1.6.4 ## explicit +# github.com/syndtr/goleveldb v1.0.0 +github.com/syndtr/goleveldb/leveldb +github.com/syndtr/goleveldb/leveldb/cache +github.com/syndtr/goleveldb/leveldb/comparer +github.com/syndtr/goleveldb/leveldb/errors +github.com/syndtr/goleveldb/leveldb/filter +github.com/syndtr/goleveldb/leveldb/iterator +github.com/syndtr/goleveldb/leveldb/journal +github.com/syndtr/goleveldb/leveldb/memdb +github.com/syndtr/goleveldb/leveldb/opt +github.com/syndtr/goleveldb/leveldb/storage +github.com/syndtr/goleveldb/leveldb/table +github.com/syndtr/goleveldb/leveldb/util # gopkg.in/ini.v1 v1.54.0 ## explicit gopkg.in/ini.v1 +# xorm.io/builder v0.3.7 +xorm.io/builder +# xorm.io/xorm v1.0.4 +## explicit +xorm.io/xorm +xorm.io/xorm/caches +xorm.io/xorm/contexts +xorm.io/xorm/convert +xorm.io/xorm/core +xorm.io/xorm/dialects +xorm.io/xorm/internal/json +xorm.io/xorm/internal/statements +xorm.io/xorm/internal/utils +xorm.io/xorm/log +xorm.io/xorm/names +xorm.io/xorm/schemas +xorm.io/xorm/tags diff --git a/vendor/xorm.io/builder/.drone.yml b/vendor/xorm.io/builder/.drone.yml new file mode 100644 index 0000000..61d323d --- /dev/null +++ b/vendor/xorm.io/builder/.drone.yml @@ -0,0 +1,21 @@ +--- +kind: pipeline +name: testing + +steps: +- name: test + pull: default + image: golang:1.11 + commands: + - go get -u golang.org/x/lint/golint + - golint ./... + - go vet + - go test -v -race -coverprofile=coverage.txt -covermode=atomic + environment: + GOPROXY: https://goproxy.cn + GO111MODULE: "on" + when: + event: + - push + - tag + - pull_request \ No newline at end of file diff --git a/vendor/xorm.io/builder/.gitignore b/vendor/xorm.io/builder/.gitignore new file mode 100644 index 0000000..723ef36 --- /dev/null +++ b/vendor/xorm.io/builder/.gitignore @@ -0,0 +1 @@ +.idea \ No newline at end of file diff --git a/vendor/xorm.io/builder/LICENSE b/vendor/xorm.io/builder/LICENSE new file mode 100644 index 0000000..614d5e2 --- /dev/null +++ b/vendor/xorm.io/builder/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2016 The Xorm Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/xorm.io/builder/README.md b/vendor/xorm.io/builder/README.md new file mode 100644 index 0000000..53e5403 --- /dev/null +++ b/vendor/xorm.io/builder/README.md @@ -0,0 +1,206 @@ +# SQL builder + +[![Build Status](https://drone.gitea.com/api/badges/xorm/builder/status.svg)](https://drone.gitea.com/xorm/builder) [![](http://gocover.io/_badge/xorm.io/builder)](http://gocover.io/xorm.io/builder) +[![](https://goreportcard.com/badge/xorm.io/builder)](https://goreportcard.com/report/xorm.io/builder) + +Package builder is a lightweight and fast SQL builder for Go and XORM. + +Make sure you have installed Go 1.8+ and then: + + go get xorm.io/builder + +# Insert + +```Go +sql, args, err := builder.Insert(Eq{"c": 1, "d": 2}).Into("table1").ToSQL() + +// INSERT INTO table1 SELECT * FROM table2 +sql, err := builder.Insert().Into("table1").Select().From("table2").ToBoundSQL() + +// INSERT INTO table1 (a, b) SELECT b, c FROM table2 +sql, err = builder.Insert("a, b").Into("table1").Select("b, c").From("table2").ToBoundSQL() +``` + +# Select + +```Go +// Simple Query +sql, args, err := Select("c, d").From("table1").Where(Eq{"a": 1}).ToSQL() +// With join +sql, args, err = Select("c, d").From("table1").LeftJoin("table2", Eq{"table1.id": 1}.And(Lt{"table2.id": 3})). + RightJoin("table3", "table2.id = table3.tid").Where(Eq{"a": 1}).ToSQL() +// From sub query +sql, args, err := Select("sub.id").From(Select("c").From("table1").Where(Eq{"a": 1}), "sub").Where(Eq{"b": 1}).ToSQL() +// From union query +sql, args, err = Select("sub.id").From( + Select("id").From("table1").Where(Eq{"a": 1}).Union("all", Select("id").From("table1").Where(Eq{"a": 2})),"sub"). + Where(Eq{"b": 1}).ToSQL() +// With order by +sql, args, err = Select("a", "b", "c").From("table1").Where(Eq{"f1": "v1", "f2": "v2"}). + OrderBy("a ASC").ToSQL() +// With limit. +// Be careful! You should set up specific dialect for builder before performing a query with LIMIT +sql, args, err = Dialect(MYSQL).Select("a", "b", "c").From("table1").OrderBy("a ASC"). + Limit(5, 10).ToSQL() +``` + +# Update + +```Go +sql, args, err := Update(Eq{"a": 2}).From("table1").Where(Eq{"a": 1}).ToSQL() +``` + +# Delete + +```Go +sql, args, err := Delete(Eq{"a": 1}).From("table1").ToSQL() +``` + +# Union + +```Go +sql, args, err := Select("*").From("a").Where(Eq{"status": "1"}). + Union("all", Select("*").From("a").Where(Eq{"status": "2"})). + Union("distinct", Select("*").From("a").Where(Eq{"status": "3"})). + Union("", Select("*").From("a").Where(Eq{"status": "4"})). + ToSQL() +``` + +# Conditions + +* `Eq` is a redefine of a map, you can give one or more conditions to `Eq` + +```Go +import . "xorm.io/builder" + +sql, args, _ := ToSQL(Eq{"a":1}) +// a=? [1] +sql, args, _ := ToSQL(Eq{"b":"c"}.And(Eq{"c": 0})) +// b=? AND c=? ["c", 0] +sql, args, _ := ToSQL(Eq{"b":"c", "c":0}) +// b=? AND c=? ["c", 0] +sql, args, _ := ToSQL(Eq{"b":"c"}.Or(Eq{"b":"d"})) +// b=? OR b=? ["c", "d"] +sql, args, _ := ToSQL(Eq{"b": []string{"c", "d"}}) +// b IN (?,?) ["c", "d"] +sql, args, _ := ToSQL(Eq{"b": 1, "c":[]int{2, 3}}) +// b=? AND c IN (?,?) [1, 2, 3] +``` + +* `Neq` is the same to `Eq` + +```Go +import . "xorm.io/builder" + +sql, args, _ := ToSQL(Neq{"a":1}) +// a<>? [1] +sql, args, _ := ToSQL(Neq{"b":"c"}.And(Neq{"c": 0})) +// b<>? AND c<>? ["c", 0] +sql, args, _ := ToSQL(Neq{"b":"c", "c":0}) +// b<>? AND c<>? ["c", 0] +sql, args, _ := ToSQL(Neq{"b":"c"}.Or(Neq{"b":"d"})) +// b<>? OR b<>? ["c", "d"] +sql, args, _ := ToSQL(Neq{"b": []string{"c", "d"}}) +// b NOT IN (?,?) ["c", "d"] +sql, args, _ := ToSQL(Neq{"b": 1, "c":[]int{2, 3}}) +// b<>? AND c NOT IN (?,?) [1, 2, 3] +``` + +* `Gt`, `Gte`, `Lt`, `Lte` + +```Go +import . "xorm.io/builder" + +sql, args, _ := ToSQL(Gt{"a", 1}.And(Gte{"b", 2})) +// a>? AND b>=? [1, 2] +sql, args, _ := ToSQL(Lt{"a", 1}.Or(Lte{"b", 2})) +// a? [1, %c%, 2] +``` + +* `Or(conds ...Cond)`, Or can connect one or more conditions via Or + +```Go +import . "xorm.io/builder" + +sql, args, _ := ToSQL(Or(Eq{"a":1}, Like{"b", "c"}, Neq{"d", 2})) +// a=? OR b LIKE ? OR d<>? [1, %c%, 2] +sql, args, _ := ToSQL(Or(Eq{"a":1}, And(Like{"b", "c"}, Neq{"d", 2}))) +// a=? OR (b LIKE ? AND d<>?) [1, %c%, 2] +``` + +* `Between` + +```Go +import . "xorm.io/builder" + +sql, args, _ := ToSQL(Between{"a", 1, 2}) +// a BETWEEN 1 AND 2 +``` + +* Define yourself conditions + +Since `Cond` is an interface. + +```Go +type Cond interface { + WriteTo(Writer) error + And(...Cond) Cond + Or(...Cond) Cond + IsValid() bool +} +``` + +You can define yourself conditions and compose with other `Cond`. \ No newline at end of file diff --git a/vendor/xorm.io/builder/builder.go b/vendor/xorm.io/builder/builder.go new file mode 100644 index 0000000..cccc8a7 --- /dev/null +++ b/vendor/xorm.io/builder/builder.go @@ -0,0 +1,321 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + sql2 "database/sql" + "fmt" +) + +type optype byte + +const ( + condType optype = iota // only conditions + selectType // select + insertType // insert + updateType // update + deleteType // delete + setOpType // set operation +) + +// all databasees +const ( + POSTGRES = "postgres" + SQLITE = "sqlite3" + MYSQL = "mysql" + MSSQL = "mssql" + ORACLE = "oracle" + + UNION = "union" + INTERSECT = "intersect" + EXCEPT = "except" +) + +type join struct { + joinType string + joinTable interface{} + joinCond Cond +} + +type setOp struct { + opType string + distinctType string + builder *Builder +} + +type limit struct { + limitN int + offset int +} + +// Builder describes a SQL statement +type Builder struct { + optype + dialect string + isNested bool + into string + from string + subQuery *Builder + cond Cond + selects []string + joins []join + setOps []setOp + limitation *limit + insertCols []string + insertVals []interface{} + updates []UpdateCond + orderBy string + groupBy string + having string +} + +// Dialect sets the db dialect of Builder. +func Dialect(dialect string) *Builder { + builder := &Builder{cond: NewCond(), dialect: dialect} + return builder +} + +// MySQL is shortcut of Dialect(MySQL) +func MySQL() *Builder { + return Dialect(MYSQL) +} + +// MsSQL is shortcut of Dialect(MsSQL) +func MsSQL() *Builder { + return Dialect(MSSQL) +} + +// Oracle is shortcut of Dialect(Oracle) +func Oracle() *Builder { + return Dialect(ORACLE) +} + +// Postgres is shortcut of Dialect(Postgres) +func Postgres() *Builder { + return Dialect(POSTGRES) +} + +// SQLite is shortcut of Dialect(SQLITE) +func SQLite() *Builder { + return Dialect(SQLITE) +} + +// Where sets where SQL +func (b *Builder) Where(cond Cond) *Builder { + if b.cond.IsValid() { + b.cond = b.cond.And(cond) + } else { + b.cond = cond + } + return b +} + +// From sets from subject(can be a table name in string or a builder pointer) and its alias +func (b *Builder) From(subject interface{}, alias ...string) *Builder { + switch subject.(type) { + case *Builder: + b.subQuery = subject.(*Builder) + + if len(alias) > 0 { + b.from = alias[0] + } else { + b.isNested = true + } + case string: + b.from = subject.(string) + + if len(alias) > 0 { + b.from = b.from + " " + alias[0] + } + } + + return b +} + +// TableName returns the table name +func (b *Builder) TableName() string { + if b.optype == insertType { + return b.into + } + return b.from +} + +// Into sets insert table name +func (b *Builder) Into(tableName string) *Builder { + b.into = tableName + return b +} + +// Union sets union conditions +func (b *Builder) Union(distinctType string, cond *Builder) *Builder { + return b.setOperation(UNION, distinctType, cond) +} + +// Intersect sets intersect conditions +func (b *Builder) Intersect(distinctType string, cond *Builder) *Builder { + return b.setOperation(INTERSECT, distinctType, cond) +} + +// Except sets except conditions +func (b *Builder) Except(distinctType string, cond *Builder) *Builder { + return b.setOperation(EXCEPT, distinctType, cond) +} + +func (b *Builder) setOperation(opType, distinctType string, cond *Builder) *Builder { + + var builder *Builder + if b.optype != setOpType { + builder = &Builder{cond: NewCond()} + builder.optype = setOpType + builder.dialect = b.dialect + builder.selects = b.selects + + currentSetOps := b.setOps + // erase sub setOps (actually append to new Builder.unions) + b.setOps = nil + + for e := range currentSetOps { + currentSetOps[e].builder.dialect = b.dialect + } + + builder.setOps = append(append(builder.setOps, setOp{opType, "", b}), currentSetOps...) + } else { + builder = b + } + + if cond != nil { + if cond.dialect == "" && builder.dialect != "" { + cond.dialect = builder.dialect + } + + builder.setOps = append(builder.setOps, setOp{opType, distinctType, cond}) + } + + return builder +} + +// Limit sets limitN condition +func (b *Builder) Limit(limitN int, offset ...int) *Builder { + b.limitation = &limit{limitN: limitN} + + if len(offset) > 0 { + b.limitation.offset = offset[0] + } + + return b +} + +// Select sets select SQL +func (b *Builder) Select(cols ...string) *Builder { + b.selects = cols + if b.optype == condType { + b.optype = selectType + } + return b +} + +// And sets AND condition +func (b *Builder) And(cond Cond) *Builder { + b.cond = And(b.cond, cond) + return b +} + +// Or sets OR condition +func (b *Builder) Or(cond Cond) *Builder { + b.cond = Or(b.cond, cond) + return b +} + +// Update sets update SQL +func (b *Builder) Update(updates ...Cond) *Builder { + b.updates = make([]UpdateCond, 0, len(updates)) + for _, update := range updates { + if u, ok := update.(UpdateCond); ok && u.IsValid() { + b.updates = append(b.updates, u) + } + } + b.optype = updateType + return b +} + +// Delete sets delete SQL +func (b *Builder) Delete(conds ...Cond) *Builder { + b.cond = b.cond.And(conds...) + b.optype = deleteType + return b +} + +// WriteTo implements Writer interface +func (b *Builder) WriteTo(w Writer) error { + switch b.optype { + /*case condType: + return b.cond.WriteTo(w)*/ + case selectType: + return b.selectWriteTo(w) + case insertType: + return b.insertWriteTo(w) + case updateType: + return b.updateWriteTo(w) + case deleteType: + return b.deleteWriteTo(w) + case setOpType: + return b.setOpWriteTo(w) + } + + return ErrNotSupportType +} + +// ToSQL convert a builder to SQL and args +func (b *Builder) ToSQL() (string, []interface{}, error) { + w := NewWriter() + if err := b.WriteTo(w); err != nil { + return "", nil, err + } + + // in case of sql.NamedArg in args + for e := range w.args { + if namedArg, ok := w.args[e].(sql2.NamedArg); ok { + w.args[e] = namedArg.Value + } + } + + var sql = w.String() + var err error + + switch b.dialect { + case ORACLE, MSSQL: + // This is for compatibility with different sql drivers + for e := range w.args { + w.args[e] = sql2.Named(fmt.Sprintf("p%d", e+1), w.args[e]) + } + + var prefix string + if b.dialect == ORACLE { + prefix = ":p" + } else { + prefix = "@p" + } + + if sql, err = ConvertPlaceholder(sql, prefix); err != nil { + return "", nil, err + } + case POSTGRES: + if sql, err = ConvertPlaceholder(sql, "$"); err != nil { + return "", nil, err + } + } + + return sql, w.args, nil +} + +// ToBoundSQL generated a bound SQL string +func (b *Builder) ToBoundSQL() (string, error) { + w := NewWriter() + if err := b.WriteTo(w); err != nil { + return "", err + } + + return ConvertToBoundSQL(w.String(), w.args) +} diff --git a/vendor/xorm.io/builder/builder_delete.go b/vendor/xorm.io/builder/builder_delete.go new file mode 100644 index 0000000..317cc3f --- /dev/null +++ b/vendor/xorm.io/builder/builder_delete.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" +) + +// Delete creates a delete Builder +func Delete(conds ...Cond) *Builder { + builder := &Builder{cond: NewCond()} + return builder.Delete(conds...) +} + +func (b *Builder) deleteWriteTo(w Writer) error { + if len(b.from) <= 0 { + return ErrNoTableName + } + + if _, err := fmt.Fprintf(w, "DELETE FROM %s WHERE ", b.from); err != nil { + return err + } + + return b.cond.WriteTo(w) +} diff --git a/vendor/xorm.io/builder/builder_insert.go b/vendor/xorm.io/builder/builder_insert.go new file mode 100644 index 0000000..8cef5c5 --- /dev/null +++ b/vendor/xorm.io/builder/builder_insert.go @@ -0,0 +1,149 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "bytes" + "fmt" + "sort" +) + +// Insert creates an insert Builder +func Insert(eq ...interface{}) *Builder { + builder := &Builder{cond: NewCond()} + return builder.Insert(eq...) +} + +func (b *Builder) insertSelectWriteTo(w Writer) error { + if _, err := fmt.Fprintf(w, "INSERT INTO %s ", b.into); err != nil { + return err + } + + if len(b.insertCols) > 0 { + fmt.Fprintf(w, "(") + for _, col := range b.insertCols { + fmt.Fprintf(w, col) + } + fmt.Fprintf(w, ") ") + } + + return b.selectWriteTo(w) +} + +func (b *Builder) insertWriteTo(w Writer) error { + if len(b.into) <= 0 { + return ErrNoTableName + } + if len(b.insertCols) <= 0 && b.from == "" { + return ErrNoColumnToInsert + } + + if b.into != "" && b.from != "" { + return b.insertSelectWriteTo(w) + } + + if _, err := fmt.Fprintf(w, "INSERT INTO %s (", b.into); err != nil { + return err + } + + var args = make([]interface{}, 0) + var bs []byte + var valBuffer = bytes.NewBuffer(bs) + + for i, col := range b.insertCols { + value := b.insertVals[i] + fmt.Fprint(w, col) + if e, ok := value.(expr); ok { + fmt.Fprintf(valBuffer, "(%s)", e.sql) + args = append(args, e.args...) + } else if value == nil { + fmt.Fprintf(valBuffer, `null`) + } else { + fmt.Fprint(valBuffer, "?") + args = append(args, value) + } + + if i != len(b.insertCols)-1 { + if _, err := fmt.Fprint(w, ","); err != nil { + return err + } + if _, err := fmt.Fprint(valBuffer, ","); err != nil { + return err + } + } + } + + if _, err := fmt.Fprint(w, ") Values ("); err != nil { + return err + } + + if _, err := w.Write(valBuffer.Bytes()); err != nil { + return err + } + if _, err := fmt.Fprint(w, ")"); err != nil { + return err + } + + w.Append(args...) + + return nil +} + +type insertColsSorter struct { + cols []string + vals []interface{} +} + +func (s insertColsSorter) Len() int { + return len(s.cols) +} + +func (s insertColsSorter) Swap(i, j int) { + s.cols[i], s.cols[j] = s.cols[j], s.cols[i] + s.vals[i], s.vals[j] = s.vals[j], s.vals[i] +} + +func (s insertColsSorter) Less(i, j int) bool { + return s.cols[i] < s.cols[j] +} + +// Insert sets insert SQL +func (b *Builder) Insert(eq ...interface{}) *Builder { + if len(eq) > 0 { + var paramType = -1 + for _, e := range eq { + switch t := e.(type) { + case Eq: + if paramType == -1 { + paramType = 0 + } + if paramType != 0 { + break + } + for k, v := range t { + b.insertCols = append(b.insertCols, k) + b.insertVals = append(b.insertVals, v) + } + case string: + if paramType == -1 { + paramType = 1 + } + if paramType != 1 { + break + } + b.insertCols = append(b.insertCols, t) + } + } + } + + if len(b.insertCols) == len(b.insertVals) { + sort.Sort(insertColsSorter{ + cols: b.insertCols, + vals: b.insertVals, + }) + } + b.optype = insertType + return b +} diff --git a/vendor/xorm.io/builder/builder_join.go b/vendor/xorm.io/builder/builder_join.go new file mode 100644 index 0000000..a6604c5 --- /dev/null +++ b/vendor/xorm.io/builder/builder_join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +// InnerJoin sets inner join +func (b *Builder) InnerJoin(joinTable, joinCond interface{}) *Builder { + return b.Join("INNER", joinTable, joinCond) +} + +// LeftJoin sets left join SQL +func (b *Builder) LeftJoin(joinTable, joinCond interface{}) *Builder { + return b.Join("LEFT", joinTable, joinCond) +} + +// RightJoin sets right join SQL +func (b *Builder) RightJoin(joinTable, joinCond interface{}) *Builder { + return b.Join("RIGHT", joinTable, joinCond) +} + +// CrossJoin sets cross join SQL +func (b *Builder) CrossJoin(joinTable, joinCond interface{}) *Builder { + return b.Join("CROSS", joinTable, joinCond) +} + +// FullJoin sets full join SQL +func (b *Builder) FullJoin(joinTable, joinCond interface{}) *Builder { + return b.Join("FULL", joinTable, joinCond) +} + +// Join sets join table and conditions +func (b *Builder) Join(joinType string, joinTable, joinCond interface{}) *Builder { + switch joinCond.(type) { + case Cond: + b.joins = append(b.joins, join{joinType, joinTable, joinCond.(Cond)}) + case string: + b.joins = append(b.joins, join{joinType, joinTable, Expr(joinCond.(string))}) + } + + return b +} diff --git a/vendor/xorm.io/builder/builder_limit.go b/vendor/xorm.io/builder/builder_limit.go new file mode 100644 index 0000000..82e1179 --- /dev/null +++ b/vendor/xorm.io/builder/builder_limit.go @@ -0,0 +1,103 @@ +// Copyright 2018 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" + "strings" +) + +func (b *Builder) limitWriteTo(w Writer) error { + if strings.TrimSpace(b.dialect) == "" { + return ErrDialectNotSetUp + } + + if b.limitation != nil { + limit := b.limitation + if limit.offset < 0 || limit.limitN <= 0 { + return ErrInvalidLimitation + } + // erase limit condition + b.limitation = nil + defer func() { + b.limitation = limit + }() + ow := w.(*BytesWriter) + + switch strings.ToLower(strings.TrimSpace(b.dialect)) { + case ORACLE: + if len(b.selects) == 0 { + b.selects = append(b.selects, "*") + } + + var final *Builder + selects := b.selects + b.selects = append(selects, "ROWNUM RN") + + var wb *Builder + if b.optype == setOpType { + wb = Dialect(b.dialect).Select("at.*", "ROWNUM RN"). + From(b, "at") + } else { + wb = b + } + + if limit.offset == 0 { + final = Dialect(b.dialect).Select(selects...).From(wb, "at"). + Where(Lte{"at.RN": limit.limitN}) + } else { + sub := Dialect(b.dialect).Select("*"). + From(b, "at").Where(Lte{"at.RN": limit.offset + limit.limitN}) + + final = Dialect(b.dialect).Select(selects...).From(sub, "att"). + Where(Gt{"att.RN": limit.offset}) + } + + return final.WriteTo(ow) + case SQLITE, MYSQL, POSTGRES: + // if type UNION, we need to write previous content back to current writer + if b.optype == setOpType { + if err := b.WriteTo(ow); err != nil { + return err + } + } + + if limit.offset == 0 { + fmt.Fprint(ow, " LIMIT ", limit.limitN) + } else { + fmt.Fprintf(ow, " LIMIT %v OFFSET %v", limit.limitN, limit.offset) + } + case MSSQL: + if len(b.selects) == 0 { + b.selects = append(b.selects, "*") + } + + var final *Builder + selects := b.selects + b.selects = append(append([]string{fmt.Sprintf("TOP %d %v", limit.limitN+limit.offset, b.selects[0])}, + b.selects[1:]...), "ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RN") + + var wb *Builder + if b.optype == setOpType { + wb = Dialect(b.dialect).Select("*", "ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RN"). + From(b, "at") + } else { + wb = b + } + + if limit.offset == 0 { + final = Dialect(b.dialect).Select(selects...).From(wb, "at") + } else { + final = Dialect(b.dialect).Select(selects...).From(wb, "at").Where(Gt{"at.RN": limit.offset}) + } + + return final.WriteTo(ow) + default: + return ErrNotSupportType + } + } + + return nil +} diff --git a/vendor/xorm.io/builder/builder_select.go b/vendor/xorm.io/builder/builder_select.go new file mode 100644 index 0000000..087a71d --- /dev/null +++ b/vendor/xorm.io/builder/builder_select.go @@ -0,0 +1,158 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" +) + +// Select creates a select Builder +func Select(cols ...string) *Builder { + builder := &Builder{cond: NewCond()} + return builder.Select(cols...) +} + +func (b *Builder) selectWriteTo(w Writer) error { + if len(b.from) <= 0 && !b.isNested { + return ErrNoTableName + } + + // perform limit before writing to writer when b.dialect between ORACLE and MSSQL + // this avoid a duplicate writing problem in simple limit query + if b.limitation != nil && (b.dialect == ORACLE || b.dialect == MSSQL) { + return b.limitWriteTo(w) + } + + if _, err := fmt.Fprint(w, "SELECT "); err != nil { + return err + } + if len(b.selects) > 0 { + for i, s := range b.selects { + if _, err := fmt.Fprint(w, s); err != nil { + return err + } + if i != len(b.selects)-1 { + if _, err := fmt.Fprint(w, ","); err != nil { + return err + } + } + } + } else { + if _, err := fmt.Fprint(w, "*"); err != nil { + return err + } + } + + if b.subQuery == nil { + if _, err := fmt.Fprint(w, " FROM ", b.from); err != nil { + return err + } + } else { + if b.cond.IsValid() && len(b.from) <= 0 { + return ErrUnnamedDerivedTable + } + if b.subQuery.dialect != "" && b.dialect != b.subQuery.dialect { + return ErrInconsistentDialect + } + + // dialect of sub-query will inherit from the main one (if not set up) + if b.dialect != "" && b.subQuery.dialect == "" { + b.subQuery.dialect = b.dialect + } + + switch b.subQuery.optype { + case selectType, setOpType: + fmt.Fprint(w, " FROM (") + if err := b.subQuery.WriteTo(w); err != nil { + return err + } + + if len(b.from) == 0 { + fmt.Fprintf(w, ")") + } else { + fmt.Fprintf(w, ") %v", b.from) + } + default: + return ErrUnexpectedSubQuery + } + } + + for _, v := range b.joins { + b, ok := v.joinTable.(*Builder) + if ok { + if _, err := fmt.Fprintf(w, " %s JOIN (", v.joinType); err != nil { + return err + } + if err := b.WriteTo(w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, ") ON "); err != nil { + return err + } + } else { + if _, err := fmt.Fprintf(w, " %s JOIN %s ON ", v.joinType, v.joinTable); err != nil { + return err + } + } + + if err := v.joinCond.WriteTo(w); err != nil { + return err + } + } + + if b.cond.IsValid() { + if _, err := fmt.Fprint(w, " WHERE "); err != nil { + return err + } + + if err := b.cond.WriteTo(w); err != nil { + return err + } + } + + if len(b.groupBy) > 0 { + if _, err := fmt.Fprint(w, " GROUP BY ", b.groupBy); err != nil { + return err + } + } + + if len(b.having) > 0 { + if _, err := fmt.Fprint(w, " HAVING ", b.having); err != nil { + return err + } + } + + if len(b.orderBy) > 0 { + if _, err := fmt.Fprint(w, " ORDER BY ", b.orderBy); err != nil { + return err + } + } + + if b.limitation != nil { + if err := b.limitWriteTo(w); err != nil { + return err + } + } + + return nil +} + +// OrderBy orderBy SQL +func (b *Builder) OrderBy(orderBy string) *Builder { + b.orderBy = orderBy + return b +} + +// GroupBy groupby SQL +func (b *Builder) GroupBy(groupby string) *Builder { + b.groupBy = groupby + return b +} + +// Having having SQL +func (b *Builder) Having(having string) *Builder { + b.having = having + return b +} diff --git a/vendor/xorm.io/builder/builder_set_operations.go b/vendor/xorm.io/builder/builder_set_operations.go new file mode 100644 index 0000000..b2b4a3d --- /dev/null +++ b/vendor/xorm.io/builder/builder_set_operations.go @@ -0,0 +1,51 @@ +// Copyright 2018 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" + "strings" +) + +func (b *Builder) setOpWriteTo(w Writer) error { + if b.limitation != nil || b.cond.IsValid() || + b.orderBy != "" || b.having != "" || b.groupBy != "" { + return ErrNotUnexpectedUnionConditions + } + + for idx, o := range b.setOps { + current := o.builder + if current.optype != selectType { + return ErrUnsupportedUnionMembers + } + + if len(b.setOps) == 1 { + if err := current.selectWriteTo(w); err != nil { + return err + } + } else { + if b.dialect != "" && b.dialect != current.dialect { + return ErrInconsistentDialect + } + + if idx != 0 { + if o.distinctType == "" { + fmt.Fprint(w, fmt.Sprintf(" %s ", strings.ToUpper(o.opType))) + } else { + fmt.Fprint(w, fmt.Sprintf(" %s %s ", strings.ToUpper(o.opType), strings.ToUpper(o.distinctType))) + } + } + fmt.Fprint(w, "(") + + if err := current.selectWriteTo(w); err != nil { + return err + } + + fmt.Fprint(w, ")") + } + } + + return nil +} diff --git a/vendor/xorm.io/builder/builder_update.go b/vendor/xorm.io/builder/builder_update.go new file mode 100644 index 0000000..5fffbe3 --- /dev/null +++ b/vendor/xorm.io/builder/builder_update.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" +) + +// UpdateCond defines an interface that cond could be used with update +type UpdateCond interface { + IsValid() bool + OpWriteTo(op string, w Writer) error +} + +// Update creates an update Builder +func Update(updates ...Cond) *Builder { + builder := &Builder{cond: NewCond()} + return builder.Update(updates...) +} + +func (b *Builder) updateWriteTo(w Writer) error { + if len(b.from) <= 0 { + return ErrNoTableName + } + if len(b.updates) <= 0 { + return ErrNoColumnToUpdate + } + + if _, err := fmt.Fprintf(w, "UPDATE %s SET ", b.from); err != nil { + return err + } + + for i, s := range b.updates { + + if err := s.OpWriteTo(",", w); err != nil { + return err + } + + if i != len(b.updates)-1 { + if _, err := fmt.Fprint(w, ","); err != nil { + return err + } + } + } + + if !b.cond.IsValid() { + return nil + } + + if _, err := fmt.Fprint(w, " WHERE "); err != nil { + return err + } + + return b.cond.WriteTo(w) +} diff --git a/vendor/xorm.io/builder/cond.go b/vendor/xorm.io/builder/cond.go new file mode 100644 index 0000000..149f5d8 --- /dev/null +++ b/vendor/xorm.io/builder/cond.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +// Cond defines an interface +type Cond interface { + WriteTo(Writer) error + And(...Cond) Cond + Or(...Cond) Cond + IsValid() bool +} + +type condEmpty struct{} + +var _ Cond = condEmpty{} + +// NewCond creates an empty condition +func NewCond() Cond { + return condEmpty{} +} + +func (condEmpty) WriteTo(w Writer) error { + return nil +} + +func (condEmpty) And(conds ...Cond) Cond { + return And(conds...) +} + +func (condEmpty) Or(conds ...Cond) Cond { + return Or(conds...) +} + +func (condEmpty) IsValid() bool { + return false +} diff --git a/vendor/xorm.io/builder/cond_and.go b/vendor/xorm.io/builder/cond_and.go new file mode 100644 index 0000000..e30bd18 --- /dev/null +++ b/vendor/xorm.io/builder/cond_and.go @@ -0,0 +1,61 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "fmt" + +type condAnd []Cond + +var _ Cond = condAnd{} + +// And generates AND conditions +func And(conds ...Cond) Cond { + var result = make(condAnd, 0, len(conds)) + for _, cond := range conds { + if cond == nil || !cond.IsValid() { + continue + } + result = append(result, cond) + } + return result +} + +func (and condAnd) WriteTo(w Writer) error { + for i, cond := range and { + _, isOr := cond.(condOr) + _, isExpr := cond.(expr) + wrap := isOr || isExpr + if wrap { + fmt.Fprint(w, "(") + } + + err := cond.WriteTo(w) + if err != nil { + return err + } + + if wrap { + fmt.Fprint(w, ")") + } + + if i != len(and)-1 { + fmt.Fprint(w, " AND ") + } + } + + return nil +} + +func (and condAnd) And(conds ...Cond) Cond { + return And(and, And(conds...)) +} + +func (and condAnd) Or(conds ...Cond) Cond { + return Or(and, Or(conds...)) +} + +func (and condAnd) IsValid() bool { + return len(and) > 0 +} diff --git a/vendor/xorm.io/builder/cond_between.go b/vendor/xorm.io/builder/cond_between.go new file mode 100644 index 0000000..10e0b83 --- /dev/null +++ b/vendor/xorm.io/builder/cond_between.go @@ -0,0 +1,65 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "fmt" + +// Between implmentes between condition +type Between struct { + Col string + LessVal interface{} + MoreVal interface{} +} + +var _ Cond = Between{} + +// WriteTo write data to Writer +func (between Between) WriteTo(w Writer) error { + if _, err := fmt.Fprintf(w, "%s BETWEEN ", between.Col); err != nil { + return err + } + if lv, ok := between.LessVal.(expr); ok { + if err := lv.WriteTo(w); err != nil { + return err + } + } else { + if _, err := fmt.Fprint(w, "?"); err != nil { + return err + } + w.Append(between.LessVal) + } + + if _, err := fmt.Fprint(w, " AND "); err != nil { + return err + } + + if mv, ok := between.MoreVal.(expr); ok { + if err := mv.WriteTo(w); err != nil { + return err + } + } else { + if _, err := fmt.Fprint(w, "?"); err != nil { + return err + } + w.Append(between.MoreVal) + } + + return nil +} + +// And implments And with other conditions +func (between Between) And(conds ...Cond) Cond { + return And(between, And(conds...)) +} + +// Or implments Or with other conditions +func (between Between) Or(conds ...Cond) Cond { + return Or(between, Or(conds...)) +} + +// IsValid tests if the condition is valid +func (between Between) IsValid() bool { + return len(between.Col) > 0 +} diff --git a/vendor/xorm.io/builder/cond_compare.go b/vendor/xorm.io/builder/cond_compare.go new file mode 100644 index 0000000..1c29371 --- /dev/null +++ b/vendor/xorm.io/builder/cond_compare.go @@ -0,0 +1,160 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "fmt" + +// WriteMap writes conditions' SQL to Writer, op could be =, <>, >, <, <=, >= and etc. +func WriteMap(w Writer, data map[string]interface{}, op string) error { + var args = make([]interface{}, 0, len(data)) + var i = 0 + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + for _, k := range keys { + v := data[k] + switch v.(type) { + case expr: + if _, err := fmt.Fprintf(w, "%s%s(", k, op); err != nil { + return err + } + + if err := v.(expr).WriteTo(w); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + case *Builder: + if _, err := fmt.Fprintf(w, "%s%s(", k, op); err != nil { + return err + } + + if err := v.(*Builder).WriteTo(w); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + default: + if _, err := fmt.Fprintf(w, "%s%s?", k, op); err != nil { + return err + } + args = append(args, v) + } + if i != len(data)-1 { + if _, err := fmt.Fprint(w, " AND "); err != nil { + return err + } + } + i = i + 1 + } + w.Append(args...) + return nil +} + +// Lt defines < condition +type Lt map[string]interface{} + +var _ Cond = Lt{} + +// WriteTo write SQL to Writer +func (lt Lt) WriteTo(w Writer) error { + return WriteMap(w, lt, "<") +} + +// And implements And with other conditions +func (lt Lt) And(conds ...Cond) Cond { + return condAnd{lt, And(conds...)} +} + +// Or implements Or with other conditions +func (lt Lt) Or(conds ...Cond) Cond { + return condOr{lt, Or(conds...)} +} + +// IsValid tests if this Eq is valid +func (lt Lt) IsValid() bool { + return len(lt) > 0 +} + +// Lte defines <= condition +type Lte map[string]interface{} + +var _ Cond = Lte{} + +// WriteTo write SQL to Writer +func (lte Lte) WriteTo(w Writer) error { + return WriteMap(w, lte, "<=") +} + +// And implements And with other conditions +func (lte Lte) And(conds ...Cond) Cond { + return And(lte, And(conds...)) +} + +// Or implements Or with other conditions +func (lte Lte) Or(conds ...Cond) Cond { + return Or(lte, Or(conds...)) +} + +// IsValid tests if this Eq is valid +func (lte Lte) IsValid() bool { + return len(lte) > 0 +} + +// Gt defines > condition +type Gt map[string]interface{} + +var _ Cond = Gt{} + +// WriteTo write SQL to Writer +func (gt Gt) WriteTo(w Writer) error { + return WriteMap(w, gt, ">") +} + +// And implements And with other conditions +func (gt Gt) And(conds ...Cond) Cond { + return And(gt, And(conds...)) +} + +// Or implements Or with other conditions +func (gt Gt) Or(conds ...Cond) Cond { + return Or(gt, Or(conds...)) +} + +// IsValid tests if this Eq is valid +func (gt Gt) IsValid() bool { + return len(gt) > 0 +} + +// Gte defines >= condition +type Gte map[string]interface{} + +var _ Cond = Gte{} + +// WriteTo write SQL to Writer +func (gte Gte) WriteTo(w Writer) error { + return WriteMap(w, gte, ">=") +} + +// And implements And with other conditions +func (gte Gte) And(conds ...Cond) Cond { + return And(gte, And(conds...)) +} + +// Or implements Or with other conditions +func (gte Gte) Or(conds ...Cond) Cond { + return Or(gte, Or(conds...)) +} + +// IsValid tests if this Eq is valid +func (gte Gte) IsValid() bool { + return len(gte) > 0 +} diff --git a/vendor/xorm.io/builder/cond_eq.go b/vendor/xorm.io/builder/cond_eq.go new file mode 100644 index 0000000..9976d18 --- /dev/null +++ b/vendor/xorm.io/builder/cond_eq.go @@ -0,0 +1,117 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" + "sort" +) + +// Incr implements a type used by Eq +type Incr int + +// Decr implements a type used by Eq +type Decr int + +// Eq defines equals conditions +type Eq map[string]interface{} + +var _ Cond = Eq{} + +// OpWriteTo writes conditions with special operator +func (eq Eq) OpWriteTo(op string, w Writer) error { + var i = 0 + for _, k := range eq.sortedKeys() { + v := eq[k] + switch v.(type) { + case []int, []int64, []string, []int32, []int16, []int8, []uint, []uint64, []uint32, []uint16, []interface{}: + if err := In(k, v).WriteTo(w); err != nil { + return err + } + case expr: + if _, err := fmt.Fprintf(w, "%s=(", k); err != nil { + return err + } + + if err := v.(expr).WriteTo(w); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + case *Builder: + if _, err := fmt.Fprintf(w, "%s=(", k); err != nil { + return err + } + + if err := v.(*Builder).WriteTo(w); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + case Incr: + if _, err := fmt.Fprintf(w, "%s=%s+?", k, k); err != nil { + return err + } + w.Append(int(v.(Incr))) + case Decr: + if _, err := fmt.Fprintf(w, "%s=%s-?", k, k); err != nil { + return err + } + w.Append(int(v.(Decr))) + case nil: + if _, err := fmt.Fprintf(w, "%s=null", k); err != nil { + return err + } + default: + if _, err := fmt.Fprintf(w, "%s=?", k); err != nil { + return err + } + w.Append(v) + } + if i != len(eq)-1 { + if _, err := fmt.Fprint(w, op); err != nil { + return err + } + } + i = i + 1 + } + return nil +} + +// WriteTo writes SQL to Writer +func (eq Eq) WriteTo(w Writer) error { + return eq.OpWriteTo(" AND ", w) +} + +// And implements And with other conditions +func (eq Eq) And(conds ...Cond) Cond { + return And(eq, And(conds...)) +} + +// Or implements Or with other conditions +func (eq Eq) Or(conds ...Cond) Cond { + return Or(eq, Or(conds...)) +} + +// IsValid tests if this Eq is valid +func (eq Eq) IsValid() bool { + return len(eq) > 0 +} + +// sortedKeys returns all keys of this Eq sorted with sort.Strings. +// It is used internally for consistent ordering when generating +// SQL, see https://gitea.com/xorm/builder/issues/10 +func (eq Eq) sortedKeys() []string { + keys := make([]string, 0, len(eq)) + for key := range eq { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/xorm.io/builder/cond_expr.go b/vendor/xorm.io/builder/cond_expr.go new file mode 100644 index 0000000..8288aa0 --- /dev/null +++ b/vendor/xorm.io/builder/cond_expr.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "fmt" + +type expr struct { + sql string + args []interface{} +} + +var _ Cond = expr{} + +// Expr generate customerize SQL +func Expr(sql string, args ...interface{}) Cond { + return expr{sql, args} +} + +func (expr expr) OpWriteTo(op string, w Writer) error { + return expr.WriteTo(w) +} + +func (expr expr) WriteTo(w Writer) error { + if _, err := fmt.Fprint(w, expr.sql); err != nil { + return err + } + w.Append(expr.args...) + return nil +} + +func (expr expr) And(conds ...Cond) Cond { + return And(expr, And(conds...)) +} + +func (expr expr) Or(conds ...Cond) Cond { + return Or(expr, Or(conds...)) +} + +func (expr expr) IsValid() bool { + return len(expr.sql) > 0 +} diff --git a/vendor/xorm.io/builder/cond_if.go b/vendor/xorm.io/builder/cond_if.go new file mode 100644 index 0000000..af9eb32 --- /dev/null +++ b/vendor/xorm.io/builder/cond_if.go @@ -0,0 +1,49 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +type condIf struct { + condition bool + condTrue Cond + condFalse Cond +} + +var _ Cond = condIf{} + +// If returns Cond via condition +func If(condition bool, condTrue Cond, condFalse ...Cond) Cond { + var c = condIf{ + condition: condition, + condTrue: condTrue, + } + if len(condFalse) > 0 { + c.condFalse = condFalse[0] + } + return c +} + +func (condIf condIf) WriteTo(w Writer) error { + if condIf.condition { + return condIf.condTrue.WriteTo(w) + } else if condIf.condFalse != nil { + return condIf.condFalse.WriteTo(w) + } + return nil +} + +func (condIf condIf) And(conds ...Cond) Cond { + return And(condIf, And(conds...)) +} + +func (condIf condIf) Or(conds ...Cond) Cond { + return Or(condIf, Or(conds...)) +} + +func (condIf condIf) IsValid() bool { + if condIf.condition { + return condIf.condTrue != nil + } + return condIf.condFalse != nil +} diff --git a/vendor/xorm.io/builder/cond_in.go b/vendor/xorm.io/builder/cond_in.go new file mode 100644 index 0000000..f6366d3 --- /dev/null +++ b/vendor/xorm.io/builder/cond_in.go @@ -0,0 +1,237 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" + "reflect" + "strings" +) + +type condIn struct { + col string + vals []interface{} +} + +var _ Cond = condIn{} + +// In generates IN condition +func In(col string, values ...interface{}) Cond { + return condIn{col, values} +} + +func (condIn condIn) handleBlank(w Writer) error { + _, err := fmt.Fprint(w, "0=1") + return err +} + +func (condIn condIn) WriteTo(w Writer) error { + if len(condIn.vals) <= 0 { + return condIn.handleBlank(w) + } + + switch condIn.vals[0].(type) { + case []int8: + vals := condIn.vals[0].([]int8) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []int16: + vals := condIn.vals[0].([]int16) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []int: + vals := condIn.vals[0].([]int) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []int32: + vals := condIn.vals[0].([]int32) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []int64: + vals := condIn.vals[0].([]int64) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint8: + vals := condIn.vals[0].([]uint8) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint16: + vals := condIn.vals[0].([]uint16) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint: + vals := condIn.vals[0].([]uint) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint32: + vals := condIn.vals[0].([]uint32) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint64: + vals := condIn.vals[0].([]uint64) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []string: + vals := condIn.vals[0].([]string) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []interface{}: + vals := condIn.vals[0].([]interface{}) + if len(vals) <= 0 { + return condIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + w.Append(vals...) + case expr: + val := condIn.vals[0].(expr) + if _, err := fmt.Fprintf(w, "%s IN (", condIn.col); err != nil { + return err + } + if err := val.WriteTo(w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + case *Builder: + bd := condIn.vals[0].(*Builder) + if _, err := fmt.Fprintf(w, "%s IN (", condIn.col); err != nil { + return err + } + if err := bd.WriteTo(w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + default: + v := reflect.ValueOf(condIn.vals[0]) + if v.Kind() == reflect.Slice { + l := v.Len() + if l == 0 { + return condIn.handleBlank(w) + } + + questionMark := strings.Repeat("?,", l) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + + for i := 0; i < l; i++ { + w.Append(v.Index(i).Interface()) + } + } else { + questionMark := strings.Repeat("?,", len(condIn.vals)) + if _, err := fmt.Fprintf(w, "%s IN (%s)", condIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + w.Append(condIn.vals...) + } + } + return nil +} + +func (condIn condIn) And(conds ...Cond) Cond { + return And(condIn, And(conds...)) +} + +func (condIn condIn) Or(conds ...Cond) Cond { + return Or(condIn, Or(conds...)) +} + +func (condIn condIn) IsValid() bool { + return len(condIn.col) > 0 && len(condIn.vals) > 0 +} diff --git a/vendor/xorm.io/builder/cond_like.go b/vendor/xorm.io/builder/cond_like.go new file mode 100644 index 0000000..e34202f --- /dev/null +++ b/vendor/xorm.io/builder/cond_like.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "fmt" + +// Like defines like condition +type Like [2]string + +var _ Cond = Like{"", ""} + +// WriteTo write SQL to Writer +func (like Like) WriteTo(w Writer) error { + if _, err := fmt.Fprintf(w, "%s LIKE ?", like[0]); err != nil { + return err + } + // FIXME: if use other regular express, this will be failed. but for compatible, keep this + if like[1][0] == '%' || like[1][len(like[1])-1] == '%' { + w.Append(like[1]) + } else { + w.Append("%" + like[1] + "%") + } + return nil +} + +// And implements And with other conditions +func (like Like) And(conds ...Cond) Cond { + return And(like, And(conds...)) +} + +// Or implements Or with other conditions +func (like Like) Or(conds ...Cond) Cond { + return Or(like, Or(conds...)) +} + +// IsValid tests if this condition is valid +func (like Like) IsValid() bool { + return len(like[0]) > 0 && len(like[1]) > 0 +} diff --git a/vendor/xorm.io/builder/cond_neq.go b/vendor/xorm.io/builder/cond_neq.go new file mode 100644 index 0000000..687c59f --- /dev/null +++ b/vendor/xorm.io/builder/cond_neq.go @@ -0,0 +1,94 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" + "sort" +) + +// Neq defines not equal conditions +type Neq map[string]interface{} + +var _ Cond = Neq{} + +// WriteTo writes SQL to Writer +func (neq Neq) WriteTo(w Writer) error { + var args = make([]interface{}, 0, len(neq)) + var i = 0 + for _, k := range neq.sortedKeys() { + v := neq[k] + switch v.(type) { + case []int, []int64, []string, []int32, []int16, []int8: + if err := NotIn(k, v).WriteTo(w); err != nil { + return err + } + case expr: + if _, err := fmt.Fprintf(w, "%s<>(", k); err != nil { + return err + } + + if err := v.(expr).WriteTo(w); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + case *Builder: + if _, err := fmt.Fprintf(w, "%s<>(", k); err != nil { + return err + } + + if err := v.(*Builder).WriteTo(w); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + default: + if _, err := fmt.Fprintf(w, "%s<>?", k); err != nil { + return err + } + args = append(args, v) + } + if i != len(neq)-1 { + if _, err := fmt.Fprint(w, " AND "); err != nil { + return err + } + } + i = i + 1 + } + w.Append(args...) + return nil +} + +// And implements And with other conditions +func (neq Neq) And(conds ...Cond) Cond { + return And(neq, And(conds...)) +} + +// Or implements Or with other conditions +func (neq Neq) Or(conds ...Cond) Cond { + return Or(neq, Or(conds...)) +} + +// IsValid tests if this condition is valid +func (neq Neq) IsValid() bool { + return len(neq) > 0 +} + +// sortedKeys returns all keys of this Neq sorted with sort.Strings. +// It is used internally for consistent ordering when generating +// SQL, see https://gitea.com/xorm/builder/issues/10 +func (neq Neq) sortedKeys() []string { + keys := make([]string, 0, len(neq)) + for key := range neq { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/xorm.io/builder/cond_not.go b/vendor/xorm.io/builder/cond_not.go new file mode 100644 index 0000000..667dfe7 --- /dev/null +++ b/vendor/xorm.io/builder/cond_not.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "fmt" + +// Not defines NOT condition +type Not [1]Cond + +var _ Cond = Not{} + +// WriteTo writes SQL to Writer +func (not Not) WriteTo(w Writer) error { + if _, err := fmt.Fprint(w, "NOT "); err != nil { + return err + } + switch not[0].(type) { + case condAnd, condOr: + if _, err := fmt.Fprint(w, "("); err != nil { + return err + } + case Eq: + if len(not[0].(Eq)) > 1 { + if _, err := fmt.Fprint(w, "("); err != nil { + return err + } + } + case Neq: + if len(not[0].(Neq)) > 1 { + if _, err := fmt.Fprint(w, "("); err != nil { + return err + } + } + } + + if err := not[0].WriteTo(w); err != nil { + return err + } + + switch not[0].(type) { + case condAnd, condOr: + if _, err := fmt.Fprint(w, ")"); err != nil { + return err + } + case Eq: + if len(not[0].(Eq)) > 1 { + if _, err := fmt.Fprint(w, ")"); err != nil { + return err + } + } + case Neq: + if len(not[0].(Neq)) > 1 { + if _, err := fmt.Fprint(w, ")"); err != nil { + return err + } + } + } + + return nil +} + +// And implements And with other conditions +func (not Not) And(conds ...Cond) Cond { + return And(not, And(conds...)) +} + +// Or implements Or with other conditions +func (not Not) Or(conds ...Cond) Cond { + return Or(not, Or(conds...)) +} + +// IsValid tests if this condition is valid +func (not Not) IsValid() bool { + return not[0] != nil && not[0].IsValid() +} diff --git a/vendor/xorm.io/builder/cond_notin.go b/vendor/xorm.io/builder/cond_notin.go new file mode 100644 index 0000000..dc3ac49 --- /dev/null +++ b/vendor/xorm.io/builder/cond_notin.go @@ -0,0 +1,234 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "fmt" + "reflect" + "strings" +) + +type condNotIn condIn + +var _ Cond = condNotIn{} + +// NotIn generate NOT IN condition +func NotIn(col string, values ...interface{}) Cond { + return condNotIn{col, values} +} + +func (condNotIn condNotIn) handleBlank(w Writer) error { + _, err := fmt.Fprint(w, "0=0") + return err +} + +func (condNotIn condNotIn) WriteTo(w Writer) error { + if len(condNotIn.vals) <= 0 { + return condNotIn.handleBlank(w) + } + + switch condNotIn.vals[0].(type) { + case []int8: + vals := condNotIn.vals[0].([]int8) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []int16: + vals := condNotIn.vals[0].([]int16) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []int: + vals := condNotIn.vals[0].([]int) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []int32: + vals := condNotIn.vals[0].([]int32) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []int64: + vals := condNotIn.vals[0].([]int64) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint8: + vals := condNotIn.vals[0].([]uint8) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint16: + vals := condNotIn.vals[0].([]uint16) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint: + vals := condNotIn.vals[0].([]uint) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint32: + vals := condNotIn.vals[0].([]uint32) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []uint64: + vals := condNotIn.vals[0].([]uint64) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []string: + vals := condNotIn.vals[0].([]string) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + for _, val := range vals { + w.Append(val) + } + case []interface{}: + vals := condNotIn.vals[0].([]interface{}) + if len(vals) <= 0 { + return condNotIn.handleBlank(w) + } + questionMark := strings.Repeat("?,", len(vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + w.Append(vals...) + case expr: + val := condNotIn.vals[0].(expr) + if _, err := fmt.Fprintf(w, "%s NOT IN (", condNotIn.col); err != nil { + return err + } + if err := val.WriteTo(w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + case *Builder: + val := condNotIn.vals[0].(*Builder) + if _, err := fmt.Fprintf(w, "%s NOT IN (", condNotIn.col); err != nil { + return err + } + if err := val.WriteTo(w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, ")"); err != nil { + return err + } + default: + v := reflect.ValueOf(condNotIn.vals[0]) + if v.Kind() == reflect.Slice { + l := v.Len() + if l == 0 { + return condNotIn.handleBlank(w) + } + + questionMark := strings.Repeat("?,", l) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + + for i := 0; i < l; i++ { + w.Append(v.Index(i).Interface()) + } + } else { + questionMark := strings.Repeat("?,", len(condNotIn.vals)) + if _, err := fmt.Fprintf(w, "%s NOT IN (%s)", condNotIn.col, questionMark[:len(questionMark)-1]); err != nil { + return err + } + w.Append(condNotIn.vals...) + } + } + return nil +} + +func (condNotIn condNotIn) And(conds ...Cond) Cond { + return And(condNotIn, And(conds...)) +} + +func (condNotIn condNotIn) Or(conds ...Cond) Cond { + return Or(condNotIn, Or(conds...)) +} + +func (condNotIn condNotIn) IsValid() bool { + return len(condNotIn.col) > 0 && len(condNotIn.vals) > 0 +} diff --git a/vendor/xorm.io/builder/cond_null.go b/vendor/xorm.io/builder/cond_null.go new file mode 100644 index 0000000..bf2aaf8 --- /dev/null +++ b/vendor/xorm.io/builder/cond_null.go @@ -0,0 +1,59 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "fmt" + +// IsNull defines IS NULL condition +type IsNull [1]string + +var _ Cond = IsNull{""} + +// WriteTo write SQL to Writer +func (isNull IsNull) WriteTo(w Writer) error { + _, err := fmt.Fprintf(w, "%s IS NULL", isNull[0]) + return err +} + +// And implements And with other conditions +func (isNull IsNull) And(conds ...Cond) Cond { + return And(isNull, And(conds...)) +} + +// Or implements Or with other conditions +func (isNull IsNull) Or(conds ...Cond) Cond { + return Or(isNull, Or(conds...)) +} + +// IsValid tests if this condition is valid +func (isNull IsNull) IsValid() bool { + return len(isNull[0]) > 0 +} + +// NotNull defines NOT NULL condition +type NotNull [1]string + +var _ Cond = NotNull{""} + +// WriteTo write SQL to Writer +func (notNull NotNull) WriteTo(w Writer) error { + _, err := fmt.Fprintf(w, "%s IS NOT NULL", notNull[0]) + return err +} + +// And implements And with other conditions +func (notNull NotNull) And(conds ...Cond) Cond { + return And(notNull, And(conds...)) +} + +// Or implements Or with other conditions +func (notNull NotNull) Or(conds ...Cond) Cond { + return Or(notNull, Or(conds...)) +} + +// IsValid tests if this condition is valid +func (notNull NotNull) IsValid() bool { + return len(notNull[0]) > 0 +} diff --git a/vendor/xorm.io/builder/cond_or.go b/vendor/xorm.io/builder/cond_or.go new file mode 100644 index 0000000..5244265 --- /dev/null +++ b/vendor/xorm.io/builder/cond_or.go @@ -0,0 +1,69 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "fmt" + +type condOr []Cond + +var _ Cond = condOr{} + +// Or sets OR conditions +func Or(conds ...Cond) Cond { + var result = make(condOr, 0, len(conds)) + for _, cond := range conds { + if cond == nil || !cond.IsValid() { + continue + } + result = append(result, cond) + } + return result +} + +// WriteTo implments Cond +func (o condOr) WriteTo(w Writer) error { + for i, cond := range o { + var needQuote bool + switch cond.(type) { + case condAnd, expr: + needQuote = true + case Eq: + needQuote = (len(cond.(Eq)) > 1) + case Neq: + needQuote = (len(cond.(Neq)) > 1) + } + + if needQuote { + fmt.Fprint(w, "(") + } + + err := cond.WriteTo(w) + if err != nil { + return err + } + + if needQuote { + fmt.Fprint(w, ")") + } + + if i != len(o)-1 { + fmt.Fprint(w, " OR ") + } + } + + return nil +} + +func (o condOr) And(conds ...Cond) Cond { + return And(o, And(conds...)) +} + +func (o condOr) Or(conds ...Cond) Cond { + return Or(o, Or(conds...)) +} + +func (o condOr) IsValid() bool { + return len(o) > 0 +} diff --git a/vendor/xorm.io/builder/doc.go b/vendor/xorm.io/builder/doc.go new file mode 100644 index 0000000..6e7dd45 --- /dev/null +++ b/vendor/xorm.io/builder/doc.go @@ -0,0 +1,120 @@ +// Copyright 2016 The XORM Authors. All rights reserved. +// Use of this source code is governed by a BSD +// license that can be found in the LICENSE file. + +/* + +Package builder is a simple and powerful sql builder for Go. + +Make sure you have installed Go 1.1+ and then: + + go get xorm.io/builder + +WARNNING: Currently, only query conditions are supported. Below is the supported conditions. + +1. Eq is a redefine of a map, you can give one or more conditions to Eq + + import . "xorm.io/builder" + + sql, args, _ := ToSQL(Eq{"a":1}) + // a=? [1] + sql, args, _ := ToSQL(Eq{"b":"c"}.And(Eq{"c": 0})) + // b=? AND c=? ["c", 0] + sql, args, _ := ToSQL(Eq{"b":"c", "c":0}) + // b=? AND c=? ["c", 0] + sql, args, _ := ToSQL(Eq{"b":"c"}.Or(Eq{"b":"d"})) + // b=? OR b=? ["c", "d"] + sql, args, _ := ToSQL(Eq{"b": []string{"c", "d"}}) + // b IN (?,?) ["c", "d"] + sql, args, _ := ToSQL(Eq{"b": 1, "c":[]int{2, 3}}) + // b=? AND c IN (?,?) [1, 2, 3] + +2. Neq is the same to Eq + + import . "xorm.io/builder" + + sql, args, _ := ToSQL(Neq{"a":1}) + // a<>? [1] + sql, args, _ := ToSQL(Neq{"b":"c"}.And(Neq{"c": 0})) + // b<>? AND c<>? ["c", 0] + sql, args, _ := ToSQL(Neq{"b":"c", "c":0}) + // b<>? AND c<>? ["c", 0] + sql, args, _ := ToSQL(Neq{"b":"c"}.Or(Neq{"b":"d"})) + // b<>? OR b<>? ["c", "d"] + sql, args, _ := ToSQL(Neq{"b": []string{"c", "d"}}) + // b NOT IN (?,?) ["c", "d"] + sql, args, _ := ToSQL(Neq{"b": 1, "c":[]int{2, 3}}) + // b<>? AND c NOT IN (?,?) [1, 2, 3] + +3. Gt, Gte, Lt, Lte + + import . "xorm.io/builder" + + sql, args, _ := ToSQL(Gt{"a", 1}.And(Gte{"b", 2})) + // a>? AND b>=? [1, 2] + sql, args, _ := ToSQL(Lt{"a", 1}.Or(Lte{"b", 2})) + // a? [1, %c%, 2] + +9. Or(conds ...Cond), Or can connect one or more conditions via Or + + import . "xorm.io/builder" + + sql, args, _ := ToSQL(Or(Eq{"a":1}, Like{"b", "c"}, Neq{"d", 2})) + // a=? OR b LIKE ? OR d<>? [1, %c%, 2] + sql, args, _ := ToSQL(Or(Eq{"a":1}, And(Like{"b", "c"}, Neq{"d", 2}))) + // a=? OR (b LIKE ? AND d<>?) [1, %c%, 2] + +10. Between + + import . "xorm.io/builder" + + sql, args, _ := ToSQL(Between("a", 1, 2)) + // a BETWEEN 1 AND 2 + +11. define yourself conditions +Since Cond is a interface, you can define yourself conditions and compare with them +*/ +package builder diff --git a/vendor/xorm.io/builder/error.go b/vendor/xorm.io/builder/error.go new file mode 100644 index 0000000..b0ded29 --- /dev/null +++ b/vendor/xorm.io/builder/error.go @@ -0,0 +1,40 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import "errors" + +var ( + // ErrNotSupportType not supported SQL type error + ErrNotSupportType = errors.New("Not supported SQL type") + // ErrNoNotInConditions no NOT IN params error + ErrNoNotInConditions = errors.New("No NOT IN conditions") + // ErrNoInConditions no IN params error + ErrNoInConditions = errors.New("No IN conditions") + // ErrNeedMoreArguments need more arguments + ErrNeedMoreArguments = errors.New("Need more sql arguments") + // ErrNoTableName no table name + ErrNoTableName = errors.New("No table indicated") + // ErrNoColumnToUpdate no column to update + ErrNoColumnToUpdate = errors.New("No column(s) to update") + // ErrNoColumnToInsert no column to insert + ErrNoColumnToInsert = errors.New("No column(s) to insert") + // ErrNotSupportDialectType not supported dialect type error + ErrNotSupportDialectType = errors.New("Not supported dialect type") + // ErrNotUnexpectedUnionConditions using union in a wrong way + ErrNotUnexpectedUnionConditions = errors.New("Unexpected conditional fields in UNION query") + // ErrUnsupportedUnionMembers unexpected members in UNION query + ErrUnsupportedUnionMembers = errors.New("Unexpected members in UNION query") + // ErrUnexpectedSubQuery Unexpected sub-query in SELECT query + ErrUnexpectedSubQuery = errors.New("Unexpected sub-query in SELECT query") + // ErrDialectNotSetUp dialect is not setup yet + ErrDialectNotSetUp = errors.New("Dialect is not setup yet, try to use `Dialect(dbType)` at first") + // ErrInvalidLimitation offset or limit is not correct + ErrInvalidLimitation = errors.New("Offset or limit is not correct") + // ErrUnnamedDerivedTable Every derived table must have its own alias + ErrUnnamedDerivedTable = errors.New("Every derived table must have its own alias") + // ErrInconsistentDialect Inconsistent dialect in same builder + ErrInconsistentDialect = errors.New("Inconsistent dialect in same builder") +) diff --git a/vendor/xorm.io/builder/go.mod b/vendor/xorm.io/builder/go.mod new file mode 100644 index 0000000..620f943 --- /dev/null +++ b/vendor/xorm.io/builder/go.mod @@ -0,0 +1,8 @@ +module xorm.io/builder + +go 1.11 + +require ( + gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/xorm.io/builder/go.sum b/vendor/xorm.io/builder/go.sum new file mode 100644 index 0000000..a5727ca --- /dev/null +++ b/vendor/xorm.io/builder/go.sum @@ -0,0 +1,9 @@ +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/xorm.io/builder/sql.go b/vendor/xorm.io/builder/sql.go new file mode 100644 index 0000000..a6d1066 --- /dev/null +++ b/vendor/xorm.io/builder/sql.go @@ -0,0 +1,165 @@ +// Copyright 2018 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + sql2 "database/sql" + "fmt" + "reflect" + "strings" + "time" +) + +func condToSQL(cond Cond) (string, []interface{}, error) { + if cond == nil || !cond.IsValid() { + return "", nil, nil + } + + w := NewWriter() + if err := cond.WriteTo(w); err != nil { + return "", nil, err + } + return w.String(), w.args, nil +} + +func condToBoundSQL(cond Cond) (string, error) { + if cond == nil || !cond.IsValid() { + return "", nil + } + + w := NewWriter() + if err := cond.WriteTo(w); err != nil { + return "", err + } + return ConvertToBoundSQL(w.String(), w.args) +} + +// ToSQL convert a builder or conditions to SQL and args +func ToSQL(cond interface{}) (string, []interface{}, error) { + switch cond.(type) { + case Cond: + return condToSQL(cond.(Cond)) + case *Builder: + return cond.(*Builder).ToSQL() + } + return "", nil, ErrNotSupportType +} + +// ToBoundSQL convert a builder or conditions to parameters bound SQL +func ToBoundSQL(cond interface{}) (string, error) { + switch cond.(type) { + case Cond: + return condToBoundSQL(cond.(Cond)) + case *Builder: + return cond.(*Builder).ToBoundSQL() + } + return "", ErrNotSupportType +} + +func noSQLQuoteNeeded(a interface{}) bool { + switch a.(type) { + case int, int8, int16, int32, int64: + return true + case uint, uint8, uint16, uint32, uint64: + return true + case float32, float64: + return true + case bool: + return true + case string: + return false + case time.Time, *time.Time: + return false + } + + t := reflect.TypeOf(a) + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.Bool: + return true + case reflect.String: + return false + } + + return false +} + +// ConvertToBoundSQL will convert SQL and args to a bound SQL +func ConvertToBoundSQL(sql string, args []interface{}) (string, error) { + buf := strings.Builder{} + var i, j, start int + for ; i < len(sql); i++ { + if sql[i] == '?' { + _, err := buf.WriteString(sql[start:i]) + if err != nil { + return "", err + } + start = i + 1 + + if len(args) == j { + return "", ErrNeedMoreArguments + } + + arg := args[j] + if namedArg, ok := arg.(sql2.NamedArg); ok { + arg = namedArg.Value + } + + if noSQLQuoteNeeded(arg) { + _, err = fmt.Fprint(&buf, arg) + } else { + // replace ' -> '' (standard replacement) to avoid critical SQL injection, + // NOTICE: may allow some injection like % (or _) in LIKE query + _, err = fmt.Fprintf(&buf, "'%v'", strings.Replace(fmt.Sprintf("%v", arg), "'", + "''", -1)) + } + if err != nil { + return "", err + } + j = j + 1 + } + } + _, err := buf.WriteString(sql[start:]) + if err != nil { + return "", err + } + return buf.String(), nil +} + +// ConvertPlaceholder replaces the place holder ? to $1, $2 ... or :1, :2 ... according prefix +func ConvertPlaceholder(sql, prefix string) (string, error) { + buf := strings.Builder{} + var i, j, start int + var ready = true + for ; i < len(sql); i++ { + if sql[i] == '\'' && i > 0 && sql[i-1] != '\\' { + ready = !ready + } + if ready && sql[i] == '?' { + if _, err := buf.WriteString(sql[start:i]); err != nil { + return "", err + } + + start = i + 1 + j = j + 1 + + if _, err := buf.WriteString(fmt.Sprintf("%v%d", prefix, j)); err != nil { + return "", err + } + } + } + + if _, err := buf.WriteString(sql[start:]); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/vendor/xorm.io/builder/writer.go b/vendor/xorm.io/builder/writer.go new file mode 100644 index 0000000..fb4fae5 --- /dev/null +++ b/vendor/xorm.io/builder/writer.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +import ( + "io" + "strings" +) + +// Writer defines the interface +type Writer interface { + io.Writer + Append(...interface{}) +} + +var _ Writer = NewWriter() + +// BytesWriter implments Writer and save SQL in bytes.Buffer +type BytesWriter struct { + *strings.Builder + args []interface{} +} + +// NewWriter creates a new string writer +func NewWriter() *BytesWriter { + w := &BytesWriter{ + Builder: &strings.Builder{}, + } + return w +} + +// Append appends args to Writer +func (w *BytesWriter) Append(args ...interface{}) { + w.args = append(w.args, args...) +} + +// Args returns args +func (w *BytesWriter) Args() []interface{} { + return w.args +} diff --git a/vendor/xorm.io/xorm/.changelog.yml b/vendor/xorm.io/xorm/.changelog.yml new file mode 100644 index 0000000..1303c9c --- /dev/null +++ b/vendor/xorm.io/xorm/.changelog.yml @@ -0,0 +1,53 @@ +# The full repository name +repo: xorm/xorm + +# Service type (gitea or github) +service: gitea + +# Base URL for Gitea instance if using gitea service type (optional) +# Default: https://gitea.com +base-url: + +# Changelog groups and which labeled PRs to add to each group +groups: + - + name: BREAKING + labels: + - kind/breaking + - + name: FEATURES + labels: + - kind/feature + - + name: SECURITY + labels: + - kind/security + - + name: BUGFIXES + labels: + - kind/bug + - + name: ENHANCEMENTS + labels: + - kind/enhancement + - kind/refactor + - kind/ui + - + name: TESTING + labels: + - kind/testing + - + name: BUILD + labels: + - kind/build + - kind/lint + - + name: DOCS + labels: + - kind/docs + - + name: MISC + default: true + +# regex indicating which labels to skip for the changelog +skip-labels: skip-changelog|backport\/.+ diff --git a/vendor/xorm.io/xorm/.drone.yml b/vendor/xorm.io/xorm/.drone.yml new file mode 100644 index 0000000..300c784 --- /dev/null +++ b/vendor/xorm.io/xorm/.drone.yml @@ -0,0 +1,337 @@ +--- +kind: pipeline +name: testing +steps: +- name: test-vet + image: golang:1.11 # The lowest golang requirement + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + commands: + - make vet + - make test + - make fmt-check + when: + event: + - push + - pull_request + +- name: test-sqlite + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + commands: + - make test-sqlite + - TEST_CACHE_ENABLE=true make test-sqlite + - TEST_QUOTE_POLICY=reserved make test-sqlite + when: + event: + - push + - pull_request + +- name: test-mysql + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql + TEST_MYSQL_CHARSET: utf8 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql + when: + event: + - push + - pull_request + +- name: test-mysql8 + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql8 + TEST_MYSQL_CHARSET: utf8mb4 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql + when: + event: + - push + - pull_request + +- name: test-mysql-utf8mb4 + image: golang:1.12 + depends_on: + - test-mysql + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql + TEST_MYSQL_CHARSET: utf8mb4 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql + when: + event: + - push + - pull_request + +- name: test-mymysql + pull: default + image: golang:1.12 + depends_on: + - test-mysql-utf8mb4 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql:3306 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mymysql + - TEST_CACHE_ENABLE=true make test-mymysql + - TEST_QUOTE_POLICY=reserved make test-mymysql + when: + event: + - push + - pull_request + +- name: test-mariadb + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mariadb + TEST_MYSQL_CHARSET: utf8mb4 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql + when: + event: + - push + - pull_request + +- name: test-postgres + pull: default + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_PGSQL_HOST: pgsql + TEST_PGSQL_DBNAME: xorm_test + TEST_PGSQL_USERNAME: postgres + TEST_PGSQL_PASSWORD: postgres + commands: + - make test-postgres + - TEST_CACHE_ENABLE=true make test-postgres + - TEST_QUOTE_POLICY=reserved make test-postgres + when: + event: + - push + - pull_request + +- name: test-postgres-schema + pull: default + image: golang:1.12 + depends_on: + - test-postgres + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_PGSQL_HOST: pgsql + TEST_PGSQL_SCHEMA: xorm + TEST_PGSQL_DBNAME: xorm_test + TEST_PGSQL_USERNAME: postgres + TEST_PGSQL_PASSWORD: postgres + commands: + - make test-postgres + - TEST_CACHE_ENABLE=true make test-postgres + - TEST_QUOTE_POLICY=reserved make test-postgres + when: + event: + - push + - pull_request + +- name: test-mssql + pull: default + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MSSQL_HOST: mssql + TEST_MSSQL_DBNAME: xorm_test + TEST_MSSQL_USERNAME: sa + TEST_MSSQL_PASSWORD: "yourStrong(!)Password" + commands: + - make test-mssql + - TEST_CACHE_ENABLE=true make test-mssql + - TEST_QUOTE_POLICY=reserved make test-mssql + - TEST_MSSQL_DEFAULT_VARCHAR=NVARCHAR TEST_MSSQL_DEFAULT_CHAR=NCHAR make test-mssql + when: + event: + - push + - pull_request + +- name: test-tidb + pull: default + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_TIDB_HOST: "tidb:4000" + TEST_TIDB_DBNAME: xorm_test + TEST_TIDB_USERNAME: root + TEST_TIDB_PASSWORD: + commands: + - make test-tidb + - TEST_CACHE_ENABLE=true make test-tidb + - TEST_QUOTE_POLICY=reserved make test-tidb + when: + event: + - push + - pull_request + +- name: test-cockroach + pull: default + image: golang:1.13 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_COCKROACH_HOST: "cockroach:26257" + TEST_COCKROACH_DBNAME: xorm_test + TEST_COCKROACH_USERNAME: root + TEST_COCKROACH_PASSWORD: + commands: + - sleep 10 + - make test-cockroach + - TEST_CACHE_ENABLE=true make test-cockroach + when: + event: + - push + - pull_request + +- name: merge_coverage + pull: default + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + depends_on: + - test-vet + - test-sqlite + - test-mysql + - test-mysql8 + - test-mymysql + - test-postgres + - test-postgres-schema + - test-mssql + - test-tidb + - test-cockroach + commands: + - make coverage + when: + event: + - push + - pull_request + +services: + +- name: mysql + pull: default + image: mysql:5.7 + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: yes + MYSQL_DATABASE: xorm_test + when: + event: + - push + - tag + - pull_request + +- name: mysql8 + pull: default + image: mysql:8.0 + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: yes + MYSQL_DATABASE: xorm_test + when: + event: + - push + - tag + - pull_request + +- name: mariadb + pull: default + image: mariadb:10.4 + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: yes + MYSQL_DATABASE: xorm_test + when: + event: + - push + - tag + - pull_request + +- name: pgsql + pull: default + image: postgres:9.5 + environment: + POSTGRES_DB: xorm_test + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + when: + event: + - push + - tag + - pull_request + +- name: mssql + pull: default + image: microsoft/mssql-server-linux:latest + environment: + ACCEPT_EULA: Y + SA_PASSWORD: yourStrong(!)Password + MSSQL_PID: Developer + when: + event: + - push + - tag + - pull_request + +- name: tidb + pull: default + image: pingcap/tidb:v3.0.3 + when: + event: + - push + - tag + - pull_request + +- name: cockroach + pull: default + image: cockroachdb/cockroach:v19.2.4 + commands: + - /cockroach/cockroach start --insecure + when: + event: + - push + - tag + - pull_request diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/xorm.io/xorm/.gitignore similarity index 65% rename from vendor/github.com/jmoiron/sqlx/.gitignore rename to vendor/xorm.io/xorm/.gitignore index 529841c..617d5da 100644 --- a/vendor/github.com/jmoiron/sqlx/.gitignore +++ b/vendor/xorm.io/xorm/.gitignore @@ -2,10 +2,12 @@ *.o *.a *.so +*.db # Folders _obj _test +vendor/ # Architecture specific extensions/prefixes *.[568vq] @@ -20,5 +22,17 @@ _cgo_export.* _testmain.go *.exe -tags -environ + +*.log +.vendor +temp_test.go +.vscode +xorm.test +*.sqlite3 +test.db.sql + +.idea/ + +*coverage.out +test.db +integrations/*.sql diff --git a/vendor/xorm.io/xorm/.revive.toml b/vendor/xorm.io/xorm/.revive.toml new file mode 100644 index 0000000..64e223b --- /dev/null +++ b/vendor/xorm.io/xorm/.revive.toml @@ -0,0 +1,25 @@ +ignoreGeneratedHeader = false +severity = "warning" +confidence = 0.8 +errorCode = 1 +warningCode = 1 + +[rule.blank-imports] +[rule.context-as-argument] +[rule.context-keys-type] +[rule.dot-imports] +[rule.error-return] +[rule.error-strings] +[rule.error-naming] +[rule.exported] +[rule.if-return] +[rule.increment-decrement] +[rule.var-naming] +[rule.var-declaration] +[rule.package-comments] +[rule.range] +[rule.receiver-naming] +[rule.time-naming] +[rule.unexported-return] +[rule.indent-error-flow] +[rule.errorf] \ No newline at end of file diff --git a/vendor/xorm.io/xorm/CHANGELOG.md b/vendor/xorm.io/xorm/CHANGELOG.md new file mode 100644 index 0000000..fa0259b --- /dev/null +++ b/vendor/xorm.io/xorm/CHANGELOG.md @@ -0,0 +1,205 @@ +# Changelog + +This changelog goes through all the changes that have been made in each release +without substantial changes to our git log. + +## [1.0.3](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1281) - 2020-07-10 + +* BUGFIXES + * Fix dump of sqlite (#1639) +* ENHANCEMENTS + * Fix index name parsing in SQLite dialect (#1737) + * add hooks for Commit and Rollback (#1733) + +## [1.0.2](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1261) - 2020-06-16 + +* FEATURES + * Add Hook (#1644) +* BUGFIXES + * Fix bug when ID used but no reference table given (#1709) + * Fix find and count bug (#1651) +* ENHANCEMENTS + * chore: improve snakeCasedName performance (#1688) + * Fix find with another struct (#1666) + * fix GetColumns missing ordinal position (#1660) +* MISC + * chore: improve titleCasedName performance (#1691) + +## [1.0.1](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1253) - 2020-03-25 + +* BUGFIXES + * Oracle : Local Naming Method (#1515) + * Fix find and count bug (#1618) + * Fix duplicated deleted condition on FindAndCount (#1619) + * Fix find and count bug with cache (#1622) + * Fix postgres schema problem (#1624) + * Fix quote with blank (#1626) + +## [1.0.0](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1242) - 2020-03-22 + +* BREAKING + * Add context for dialects (#1558) + * Move zero functions to a standalone package (#1548) + * Merge core package back into the main repository and split into serval sub packages. (#1543) +* FEATURES + * Use a new ContextLogger interface to implement logger (#1557) +* BUGFIXES + * Fix setschema (#1606) + * Fix dump/import bug (#1603) + * Fix pk bug (#1602) + * Fix master/slave bug (#1601) + * Fix bug when dump (#1597) + * Ignore schema when dbtype is not postgres (#1593) + * Fix table name (#1590) + * Fix find alias bug (#1581) + * Fix rows bug (#1576) + * Fix map with cols (#1575) + * Fix bug on deleted with join (#1570) + * Improve quote policy (#1567) + * Fix break session sql enable feature (#1566) + * Fix mssql quote (#1535) + * Fix join table name quote bug (#1534) + * Fix mssql issue with duplicate columns. (#1225) + * Fix mysql8.0 sync failed (#808) +* ENHANCEMENTS + * Fix batch insert interface slice be panic (#1598) + * Move some codes to statement sub package (#1574) + * Remove circle file (#1569) + * Move statement as a sub package (#1564) + * Move maptype to tag parser (#1561) + * Move caches to manager (#1553) + * Improve code (#1552) + * Improve some codes (#1551) + * Improve statement (#1549) + * Move tag parser related codes as a standalone sub package (#1547) + * Move reserve words related files into dialects sub package (#1544) + * Fix `Conversion` method `ToDB() ([]byte, error)` return type is nil (#1296) + * Check driver.Valuer response, and skip the column if nil (#1167) + * Add cockroach support and tests (#896) +* TESTING + * Improve tests (#1572) +* BUILD + * Add changelog file and tool configuration (#1546) +* DOCS + * Fix outdate changelog (#1565) + +## old changelog + +* **v0.6.5** + * Postgres schema support + * vgo support + * Add FindAndCount + * Database special params support via NewEngineWithParams + * Some bugs fixed + +* **v0.6.4** + * Automatical Read/Write seperatelly + * Query/QueryString/QueryInterface and action with Where/And + * Get support non-struct variables + * BufferSize on Iterate + * fix some other bugs. + +* **v0.6.3** + * merge tests to main project + * add `Exist` function + * add `SumInt` function + * Mysql now support read and create column comment. + * fix time related bugs. + * fix some other bugs. + +* **v0.6.2** + * refactor tag parse methods + * add Scan features to Get + * add QueryString method + +* **v0.4.5** + * many bugs fixed + * extends support unlimited deep + * Delete Limit support + +* **v0.4.4** + * ql database expriment support + * tidb database expriment support + * sql.NullString and etc. field support + * select ForUpdate support + * many bugs fixed + +* **v0.4.3** + * Json column type support + * oracle expirement support + * bug fixed + +* **v0.4.2** + * Transaction will auto rollback if not Rollback or Commit be called. + * Gonic Mapper support + * bug fixed + +* **v0.4.1** + * deleted tag support for soft delete + * bug fixed + +* **v0.4.0 RC1** + Changes: + * moved xorm cmd to [github.com/go-xorm/cmd](github.com/go-xorm/cmd) + * refactored general DB operation a core lib at [github.com/go-xorm/core](https://github.com/go-xorm/core) + * moved tests to github.com/go-xorm/tests [github.com/go-xorm/tests](github.com/go-xorm/tests) + + Improvements: + * Prepared statement cache + * Add Incr API + * Specify Timezone Location + +* **v0.3.2** + Improvements: + * Add AllCols & MustCols function + * Add TableName for custom table name + + Bug Fixes: + * #46 + * #51 + * #53 + * #89 + * #86 + * #92 + +* **v0.3.1** + + Features: + * Support MSSQL DB via ODBC driver ([github.com/lunny/godbc](https://github.com/lunny/godbc)); + * Composite Key, using multiple pk xorm tag + * Added Row() API as alternative to Iterate() API for traversing result set, provide similar usages to sql.Rows type + * ORM struct allowed declaration of pointer builtin type as members to allow null DB fields + * Before and After Event processors + + Improvements: + * Allowed int/int32/int64/uint/uint32/uint64/string as Primary Key type + * Performance improvement for Get()/Find()/Iterate() + + +* **v0.2.3** : Improved documents; Optimistic Locking support; Timestamp with time zone support; Mapper change to tableMapper and columnMapper & added PrefixMapper & SuffixMapper support custom table or column name's prefix and suffix;Insert now return affected, err instead of id, err; Added UseBool & Distinct; + +* **v0.2.2** : Postgres drivers now support lib/pq; Added method Iterate for record by record to handler;Added SetMaxConns(go1.2+) support; some bugs fixed. + +* **v0.2.1** : Added database reverse tool, now support generate go & c++ codes, see [Xorm Tool README](https://github.com/go-xorm/xorm/blob/master/xorm/README.md); some bug fixed. + +* **v0.2.0** : Added Cache supported, select is speeder up 3~5x; Added SameMapper for same name between struct and table; Added Sync method for auto added tables, columns, indexes; + +* **v0.1.9** : Added postgres and mymysql supported; Added ` and ? supported on Raw SQL even if postgres; Added Cols, StoreEngine, Charset function, Added many column data type supported, please see [Mapping Rules](#mapping). + +* **v0.1.8** : Added union index and union unique supported, please see [Mapping Rules](#mapping). + +* **v0.1.7** : Added IConnectPool interface and NoneConnectPool, SysConnectPool, SimpleConnectPool the three implements. You can choose one of them and the default is SysConnectPool. You can customrize your own connection pool. struct Engine added Close method, It should be invoked before system exit. + +* **v0.1.6** : Added conversion interface support; added struct derive support; added single mapping support + +* **v0.1.5** : Added multi threads support; added Sql() function for struct query; Get function changed return inteface; MakeSession and Create are instead with NewSession and NewEngine. + +* **v0.1.4** : Added simple cascade load support; added more data type supports. + +* **v0.1.3** : Find function now supports both slice and map; Add Table function for multi tables and temperory tables support + +* **v0.1.2** : Insert function now supports both struct and slice pointer parameters, batch inserting and auto transaction + +* **v0.1.1** : Add Id, In functions and improved README + +* **v0.1.0** : Initial release. \ No newline at end of file diff --git a/vendor/xorm.io/xorm/CONTRIBUTING.md b/vendor/xorm.io/xorm/CONTRIBUTING.md new file mode 100644 index 0000000..a6925a5 --- /dev/null +++ b/vendor/xorm.io/xorm/CONTRIBUTING.md @@ -0,0 +1,87 @@ +## Contributing to xorm + +`xorm` has a backlog of [pull requests](https://help.github.com/articles/using-pull-requests), but contributions are still very +much welcome. You can help with patch review, submitting bug reports, +or adding new functionality. There is no formal style guide, but +please conform to the style of existing code and general Go formatting +conventions when submitting patches. + +* [fork a repo](https://help.github.com/articles/fork-a-repo) +* [creating a pull request ](https://help.github.com/articles/creating-a-pull-request) + +### Language + +Since `xorm` is a world-wide open source project, please describe your issues or code changes in English as soon as possible. + +### Sign your codes with comments +``` +// !! your comments + +e.g., + +// !lunny! this is comments made by lunny +``` + +### Build xorm and test it locally + +Once you write some codes on your feature branch, you could build and test locally at first. Just + +``` +make build +``` +and +``` +make test +``` + +The `make test` is an alias of `make test-sqlite`, it will run the tests on a sqlite database file. No extra thing needed to do except you need to cgo compile enviroment. + +If you write a new test method, you could run + +``` +make test-sqlite#TestMyNewMethod +``` + +that will only run the special test method. + +If you want to run another datase, you have to prepare a running database at first, and then, you could + +``` +TEST_MYSQL_HOST= TEST_MYSQL_CHARSET= TEST_MYSQL_DBNAME= TEST_MYSQL_USERNAME= TEST_MYSQL_PASSWORD= make test-mysql +``` + +or other databases: +``` +TEST_MSSQL_HOST= TEST_MSSQL_DBNAME= TEST_MSSQL_USERNAME= TEST_MSSQL_PASSWORD= make test-mssql +``` +``` +TEST_PGSQL_HOST= TEST_PGSQL_SCHEMA= TEST_PGSQL_DBNAME= TEST_PGSQL_USERNAME= TEST_PGSQL_PASSWORD= make test-postgres +``` +``` +TEST_TIDB_HOST= TEST_TIDB_DBNAME= TEST_TIDB_USERNAME= TEST_TIDB_PASSWORD= make test-tidb +``` + +And if your branch is related with cache, you could also enable it via `TEST_CACHE_ENABLE=true`. + +### Patch review + +Help review existing open [pull requests](https://help.github.com/articles/using-pull-requests) by commenting on the code or +proposed functionality. + +### Bug reports + +We appreciate any bug reports, but especially ones with self-contained +(doesn't depend on code outside of xorm), minimal (can't be simplified +further) test cases. It's especially helpful if you can submit a pull +request with just the failing test case(you can find some example test file like [session_get_test.go](https://gitea.com/xorm/xorm/src/branch/master/session_get_test.go)). + +If you implements a new database interface, you maybe need to add a test_.sh file. +For example, [mysql_test.go](https://gitea.com/xorm/xorm/src/branch/master/test_mysql.sh) + +### New functionality + +There are a number of pending patches for new functionality, so +additional feature patches will take a while to merge. Still, patches +are generally reviewed based on usefulness and complexity in addition +to time-in-queue, so if you have a knockout idea, take a shot. Feel +free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/xorm.io/xorm/LICENSE b/vendor/xorm.io/xorm/LICENSE new file mode 100644 index 0000000..84d2ae5 --- /dev/null +++ b/vendor/xorm.io/xorm/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 - 2015 The Xorm Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/xorm.io/xorm/Makefile b/vendor/xorm.io/xorm/Makefile new file mode 100644 index 0000000..ed87388 --- /dev/null +++ b/vendor/xorm.io/xorm/Makefile @@ -0,0 +1,224 @@ +IMPORT := xorm.io/xorm +export GO111MODULE=on + +GO ?= go +GOFMT ?= gofmt -s +TAGS ?= +SED_INPLACE := sed -i + +GOFILES := $(shell find . -name "*.go" -type f) +INTEGRATION_PACKAGES := xorm.io/xorm/integrations +PACKAGES ?= $(filter-out $(INTEGRATION_PACKAGES),$(shell $(GO) list ./...)) + +TEST_COCKROACH_HOST ?= cockroach:26257 +TEST_COCKROACH_SCHEMA ?= +TEST_COCKROACH_DBNAME ?= xorm_test +TEST_COCKROACH_USERNAME ?= postgres +TEST_COCKROACH_PASSWORD ?= + +TEST_MSSQL_HOST ?= mssql:1433 +TEST_MSSQL_DBNAME ?= gitea +TEST_MSSQL_USERNAME ?= sa +TEST_MSSQL_PASSWORD ?= MwantsaSecurePassword1 +TEST_MSSQL_DEFAULT_VARCHAR ?= varchar +TEST_MSSQL_DEFAULT_CHAR ?= char + +TEST_MYSQL_HOST ?= mysql:3306 +TEST_MYSQL_CHARSET ?= utf8 +TEST_MYSQL_DBNAME ?= xorm_test +TEST_MYSQL_USERNAME ?= root +TEST_MYSQL_PASSWORD ?= + +TEST_PGSQL_HOST ?= pgsql:5432 +TEST_PGSQL_SCHEMA ?= +TEST_PGSQL_DBNAME ?= xorm_test +TEST_PGSQL_USERNAME ?= postgres +TEST_PGSQL_PASSWORD ?= mysecretpassword + +TEST_TIDB_HOST ?= tidb:4000 +TEST_TIDB_DBNAME ?= xorm_test +TEST_TIDB_USERNAME ?= root +TEST_TIDB_PASSWORD ?= + +TEST_CACHE_ENABLE ?= false +TEST_QUOTE_POLICY ?= always + +.PHONY: all +all: build + +.PHONY: build +build: go-check $(GO_SOURCES) + $(GO) build $(PACKAGES) + +.PHONY: clean +clean: + $(GO) clean -i ./... + rm -rf *.sql *.log test.db *coverage.out coverage.all integrations/*.sql + +.PHONY: coverage +coverage: + @hash gocovmerge > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/wadey/gocovmerge; \ + fi + gocovmerge $(shell find . -type f -name "coverage.out") > coverage.all;\ + +.PHONY: fmt +fmt: + $(GOFMT) -w $(GOFILES) + +.PHONY: fmt-check +fmt-check: + # get all go files and run go fmt on them + @diff=$$($(GOFMT) -d $(GOFILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +.PHONY: go-check +go-check: + $(eval GO_VERSION := $(shell printf "%03d%03d%03d" $(shell go version | grep -Eo '[0-9]+\.?[0-9]+?\.?[0-9]?\s' | tr '.' ' ');)) + @if [ "$(GO_VERSION)" -lt "001011000" ]; then \ + echo "Gitea requires Go 1.11.0 or greater to build. You can get it at https://golang.org/dl/"; \ + exit 1; \ + fi + +.PHONY: help +help: + @echo "Make Routines:" + @echo " - equivalent to \"build\"" + @echo " - build creates the entire project" + @echo " - clean delete integration files and build files but not css and js files" + @echo " - fmt format the code" + @echo " - lint run code linter revive" + @echo " - misspell check if a word is written wrong" + @echo " - test run default unit test" + @echo " - test-cockroach run integration tests for cockroach" + @echo " - test-mysql run integration tests for mysql" + @echo " - test-mssql run integration tests for mssql" + @echo " - test-postgres run integration tests for postgres" + @echo " - test-sqlite run integration tests for sqlite" + @echo " - test-tidb run integration tests for tidb" + @echo " - vet examines Go source code and reports suspicious constructs" + +.PHONY: lint +lint: revive + +.PHONY: revive +revive: + @hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/mgechev/revive; \ + fi + revive -config .revive.toml -exclude=./vendor/... ./... || exit 1 + +.PHONY: misspell +misspell: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -w -i unknwon $(GOFILES) + +.PHONY: misspell-check +misspell-check: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -error -i unknwon,destory $(GOFILES) + +.PHONY: test +test: go-check + $(GO) test $(PACKAGES) + +.PNONY: test-cockroach +test-cockroach: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \ + -ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-cockroach\#% +test-cockroach\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \ + -ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mssql +test-mssql: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \ + -default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \ + -coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mssql\#% +test-mssql\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \ + -default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \ + -coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mymysql +test-mymysql: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \ + -coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mymysql\#% +test-mymysql\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \ + -coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mysql +test-mysql: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \ + -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-mysql\#% +test-mysql\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \ + -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-postgres +test-postgres: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-postgres\#% +test-postgres\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite +test-sqlite: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite-schema +test-sqlite-schema: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite\#% +test-sqlite\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-tidb +test-tidb: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \ + -conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-tidb\#% +test-tidb\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \ + -conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: vet +vet: + $(GO) vet $(shell $(GO) list ./...) \ No newline at end of file diff --git a/vendor/xorm.io/xorm/README.md b/vendor/xorm.io/xorm/README.md new file mode 100644 index 0000000..6738083 --- /dev/null +++ b/vendor/xorm.io/xorm/README.md @@ -0,0 +1,482 @@ +# xorm + +[中文](https://gitea.com/xorm/xorm/src/branch/master/README_CN.md) + +Xorm is a simple and powerful ORM for Go. + +[![Build Status](https://drone.gitea.com/api/badges/xorm/xorm/status.svg)](https://drone.gitea.com/xorm/xorm) [![](http://gocover.io/_badge/xorm.io/xorm)](https://gocover.io/xorm.io/xorm) [![](https://goreportcard.com/badge/xorm.io/xorm)](https://goreportcard.com/report/xorm.io/xorm) [![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3) + +## Notice + +v1.0.0 has some break changes from v0.8.2. + +- Removed some non gonic function name `Id`, `Sql`, please use `ID`, `SQL` instead. +- Removed the dependent from `xorm.io/core` and moved the codes to `xorm.io/xorm/core`, `xorm.io/xorm/names`, `xorm.io/xorm/schemas` and others. +- Renamed some interface names. i.e. `core.IMapper` -> `names.Mapper`, `core.ILogger` -> `log.Logger`. + +## Features + +* Struct <-> Table Mapping Support +* Chainable APIs +* Transaction Support +* Both ORM and raw SQL operation Support +* Sync database schema Support +* Query Cache speed up +* Database Reverse support via [xorm.io/reverse](https://xorm.io/reverse) +* Simple cascade loading support +* Optimistic Locking support +* SQL Builder support via [xorm.io/builder](https://xorm.io/builder) +* Automatical Read/Write seperatelly +* Postgres schema support +* Context Cache support +* Support log/SQLLog context + +## Drivers Support + +Drivers for Go's sql package which currently support database/sql includes: + +* [Mysql5.*](https://github.com/mysql/mysql-server/tree/5.7) / [Mysql8.*](https://github.com/mysql/mysql-server) / [Mariadb](https://github.com/MariaDB/server) / [Tidb](https://github.com/pingcap/tidb) + - [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) + - [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) + +* [Postgres](https://github.com/postgres/postgres) / [Cockroach](https://github.com/cockroachdb/cockroach) + - [github.com/lib/pq](https://github.com/lib/pq) + +* [SQLite](https://sqlite.org) + - [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) + +* MsSql + - [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) + +* Oracle + - [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (experiment) + +## Installation + + go get xorm.io/xorm + +## Documents + +* [Manual](http://xorm.io/docs) + +* [GoDoc](http://pkg.go.dev/xorm.io/xorm) + +## Quick Start + +* Create Engine + +Firstly, we should new an engine for a database. + +```Go +engine, err := xorm.NewEngine(driverName, dataSourceName) +``` + +* Define a struct and Sync2 table struct to database + +```Go +type User struct { + Id int64 + Name string + Salt string + Age int + Passwd string `xorm:"varchar(200)"` + Created time.Time `xorm:"created"` + Updated time.Time `xorm:"updated"` +} + +err := engine.Sync2(new(User)) +``` + +* Create Engine Group + +```Go +dataSourceNameSlice := []string{masterDataSourceName, slave1DataSourceName, slave2DataSourceName} +engineGroup, err := xorm.NewEngineGroup(driverName, dataSourceNameSlice) +``` + +```Go +masterEngine, err := xorm.NewEngine(driverName, masterDataSourceName) +slave1Engine, err := xorm.NewEngine(driverName, slave1DataSourceName) +slave2Engine, err := xorm.NewEngine(driverName, slave2DataSourceName) +engineGroup, err := xorm.NewEngineGroup(masterEngine, []*Engine{slave1Engine, slave2Engine}) +``` + +Then all place where `engine` you can just use `engineGroup`. + +* `Query` runs a SQL string, the returned results is `[]map[string][]byte`, `QueryString` returns `[]map[string]string`, `QueryInterface` returns `[]map[string]interface{}`. + +```Go +results, err := engine.Query("select * from user") +results, err := engine.Where("a = 1").Query() + +results, err := engine.QueryString("select * from user") +results, err := engine.Where("a = 1").QueryString() + +results, err := engine.QueryInterface("select * from user") +results, err := engine.Where("a = 1").QueryInterface() +``` + +* `Exec` runs a SQL string, it returns `affected` and `error` + +```Go +affected, err := engine.Exec("update user set age = ? where name = ?", age, name) +``` + +* `Insert` one or multiple records to database + +```Go +affected, err := engine.Insert(&user) +// INSERT INTO struct () values () + +affected, err := engine.Insert(&user1, &user2) +// INSERT INTO struct1 () values () +// INSERT INTO struct2 () values () + +affected, err := engine.Insert(&users) +// INSERT INTO struct () values (),(),() + +affected, err := engine.Insert(&user1, &users) +// INSERT INTO struct1 () values () +// INSERT INTO struct2 () values (),(),() +``` + +* `Get` query one record from database + +```Go +has, err := engine.Get(&user) +// SELECT * FROM user LIMIT 1 + +has, err := engine.Where("name = ?", name).Desc("id").Get(&user) +// SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1 + +var name string +has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name) +// SELECT name FROM user WHERE id = ? + +var id int64 +has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id) +has, err := engine.SQL("select id from user").Get(&id) +// SELECT id FROM user WHERE name = ? + +var valuesMap = make(map[string]string) +has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap) +// SELECT * FROM user WHERE id = ? + +var valuesSlice = make([]interface{}, len(cols)) +has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice) +// SELECT col1, col2, col3 FROM user WHERE id = ? +``` + +* `Exist` check if one record exist on table + +```Go +has, err := testEngine.Exist(new(RecordExist)) +// SELECT * FROM record_exist LIMIT 1 + +has, err = testEngine.Exist(&RecordExist{ + Name: "test1", + }) +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 + +has, err = testEngine.Where("name = ?", "test1").Exist(&RecordExist{}) +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 + +has, err = testEngine.SQL("select * from record_exist where name = ?", "test1").Exist() +// select * from record_exist where name = ? + +has, err = testEngine.Table("record_exist").Exist() +// SELECT * FROM record_exist LIMIT 1 + +has, err = testEngine.Table("record_exist").Where("name = ?", "test1").Exist() +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 +``` + +* `Find` query multiple records from database, also you can use join and extends + +```Go +var users []User +err := engine.Where("name = ?", name).And("age > 10").Limit(10, 0).Find(&users) +// SELECT * FROM user WHERE name = ? AND age > 10 limit 10 offset 0 + +type Detail struct { + Id int64 + UserId int64 `xorm:"index"` +} + +type UserDetail struct { + User `xorm:"extends"` + Detail `xorm:"extends"` +} + +var users []UserDetail +err := engine.Table("user").Select("user.*, detail.*"). + Join("INNER", "detail", "detail.user_id = user.id"). + Where("user.name = ?", name).Limit(10, 0). + Find(&users) +// SELECT user.*, detail.* FROM user INNER JOIN detail WHERE user.name = ? limit 10 offset 0 +``` + +* `Iterate` and `Rows` query multiple records and record by record handle, there are two methods Iterate and Rows + +```Go +err := engine.Iterate(&User{Name:name}, func(idx int, bean interface{}) error { + user := bean.(*User) + return nil +}) +// SELECT * FROM user + +err := engine.BufferSize(100).Iterate(&User{Name:name}, func(idx int, bean interface{}) error { + user := bean.(*User) + return nil +}) +// SELECT * FROM user Limit 0, 100 +// SELECT * FROM user Limit 101, 100 + +rows, err := engine.Rows(&User{Name:name}) +// SELECT * FROM user +defer rows.Close() +bean := new(Struct) +for rows.Next() { + err = rows.Scan(bean) +} +``` + +* `Update` update one or more records, default will update non-empty and non-zero fields except when you use Cols, AllCols and so on. + +```Go +affected, err := engine.ID(1).Update(&user) +// UPDATE user SET ... Where id = ? + +affected, err := engine.Update(&user, &User{Name:name}) +// UPDATE user SET ... Where name = ? + +var ids = []int64{1, 2, 3} +affected, err := engine.In("id", ids).Update(&user) +// UPDATE user SET ... Where id IN (?, ?, ?) + +// force update indicated columns by Cols +affected, err := engine.ID(1).Cols("age").Update(&User{Name:name, Age: 12}) +// UPDATE user SET age = ?, updated=? Where id = ? + +// force NOT update indicated columns by Omit +affected, err := engine.ID(1).Omit("name").Update(&User{Name:name, Age: 12}) +// UPDATE user SET age = ?, updated=? Where id = ? + +affected, err := engine.ID(1).AllCols().Update(&user) +// UPDATE user SET name=?,age=?,salt=?,passwd=?,updated=? Where id = ? +``` + +* `Delete` delete one or more records, Delete MUST have condition + +```Go +affected, err := engine.Where(...).Delete(&user) +// DELETE FROM user Where ... + +affected, err := engine.ID(2).Delete(&user) +// DELETE FROM user Where id = ? +``` + +* `Count` count records + +```Go +counts, err := engine.Count(&user) +// SELECT count(*) AS total FROM user +``` + +* `FindAndCount` combines function `Find` with `Count` which is usually used in query by page + +```Go +var users []User +counts, err := engine.FindAndCount(&users) +``` + +* `Sum` sum functions + +```Go +agesFloat64, err := engine.Sum(&user, "age") +// SELECT sum(age) AS total FROM user + +agesInt64, err := engine.SumInt(&user, "age") +// SELECT sum(age) AS total FROM user + +sumFloat64Slice, err := engine.Sums(&user, "age", "score") +// SELECT sum(age), sum(score) FROM user + +sumInt64Slice, err := engine.SumsInt(&user, "age", "score") +// SELECT sum(age), sum(score) FROM user +``` + +* Query conditions builder + +```Go +err := engine.Where(builder.NotIn("a", 1, 2).And(builder.In("b", "c", "d", "e"))).Find(&users) +// SELECT id, name ... FROM user WHERE a NOT IN (?, ?) AND b IN (?, ?, ?) +``` + +* Multiple operations in one go routine, no transaction here but resue session memory + +```Go +session := engine.NewSession() +defer session.Close() + +user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} +if _, err := session.Insert(&user1); err != nil { + return err +} + +user2 := Userinfo{Username: "yyy"} +if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return err +} + +if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return err +} + +return nil +``` + +* Transaction should be on one go routine. There is transaction and resue session memory + +```Go +session := engine.NewSession() +defer session.Close() + +// add Begin() before any action +if err := session.Begin(); err != nil { + // if returned then will rollback automatically + return err +} + +user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} +if _, err := session.Insert(&user1); err != nil { + return err +} + +user2 := Userinfo{Username: "yyy"} +if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return err +} + +if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return err +} + +// add Commit() after all actions +return session.Commit() +``` + +* Or you can use `Transaction` to replace above codes. + +```Go +res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) { + user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} + if _, err := session.Insert(&user1); err != nil { + return nil, err + } + + user2 := Userinfo{Username: "yyy"} + if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return nil, err + } + + if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return nil, err + } + return nil, nil +}) +``` + +* Context Cache, if enabled, current query result will be cached on session and be used by next same statement on the same session. + +```Go + sess := engine.NewSession() + defer sess.Close() + + var context = xorm.NewMemoryContextCache() + + var c2 ContextGetStruct + has, err := sess.ID(1).ContextCache(context).Get(&c2) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, 1, c2.Id) + assert.EqualValues(t, "1", c2.Name) + sql, args := sess.LastSQL() + assert.True(t, len(sql) > 0) + assert.True(t, len(args) > 0) + + var c3 ContextGetStruct + has, err = sess.ID(1).ContextCache(context).Get(&c3) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, 1, c3.Id) + assert.EqualValues(t, "1", c3.Name) + sql, args = sess.LastSQL() + assert.True(t, len(sql) == 0) + assert.True(t, len(args) == 0) +``` + +## Contributing + +If you want to pull request, please see [CONTRIBUTING](https://gitea.com/xorm/xorm/src/branch/master/CONTRIBUTING.md). And you can also go to [Xorm on discourse](https://xorm.discourse.group) to discuss. + +## Credits + +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/xorm#backer)] + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/xorm#sponsor)] + +## Changelog + +You can find all the changelog [here](CHANGELOG.md) + +## Cases + +* [studygolang](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang) + +* [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea) + +* [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs) + +* [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana) + +* [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader) + +* [Wego](http://github.com/go-tango/wego) + +* [Docker.cn](https://docker.cn/) + +* [Xorm Adapter](https://github.com/casbin/xorm-adapter) for [Casbin](https://github.com/casbin/casbin) - [github.com/casbin/xorm-adapter](https://github.com/casbin/xorm-adapter) + +* [Gorevel](http://gorevel.cn/) - [github.com/goofcc/gorevel](http://github.com/goofcc/gorevel) + +* [Gowalker](http://gowalker.org) - [github.com/Unknwon/gowalker](http://github.com/Unknwon/gowalker) + +* [Gobuild.io](http://gobuild.io) - [github.com/shxsun/gobuild](http://github.com/shxsun/gobuild) + +* [Sudo China](http://sudochina.com) - [github.com/insionng/toropress](http://github.com/insionng/toropress) + +* [Godaily](http://godaily.org) - [github.com/govc/godaily](http://github.com/govc/godaily) + +* [YouGam](http://www.yougam.com/) + +* [GoCMS - github.com/zzboy/GoCMS](https://github.com/zzdboy/GoCMS) + +* [GoBBS - gobbs.domolo.com](http://gobbs.domolo.com/) + +* [go-blog](http://wangcheng.me) - [github.com/easykoo/go-blog](https://github.com/easykoo/go-blog) + +## LICENSE + +BSD License [http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/) diff --git a/vendor/xorm.io/xorm/README_CN.md b/vendor/xorm.io/xorm/README_CN.md new file mode 100644 index 0000000..80245dd --- /dev/null +++ b/vendor/xorm.io/xorm/README_CN.md @@ -0,0 +1,474 @@ +# xorm + +[English](https://gitea.com/xorm/xorm/src/branch/master/README.md) + +xorm 是一个简单而强大的Go语言ORM库. 通过它可以使数据库操作非常简便。 + +[![Build Status](https://drone.gitea.com/api/badges/xorm/xorm/status.svg)](https://drone.gitea.com/xorm/xorm) [![](http://gocover.io/_badge/xorm.io/xorm)](https://gocover.io/xorm.io/xorm) [![](https://goreportcard.com/badge/xorm.io/xorm)](https://goreportcard.com/report/xorm.io/xorm) [![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3) + +## Notice + +v1.0.0 相对于 v0.8.2 有以下不兼容的变更: + +- 移除了部分不符合Go语言命名的函数,如 `Id`, `Sql`,请使用 `ID`, `SQL` 替代。 +- 删除了对 `xorm.io/core` 的依赖。大部分代码迁移到了 `xorm.io/xorm/core`, `xorm.io/xorm/names`, `xorm.io/xorm/schemas` 等等几个包中. +- 重命名了几个结构体,如: `core.IMapper` -> `names.Mapper`, `core.ILogger` -> `log.Logger`. + +## 特性 + +* 支持 Struct 和数据库表之间的灵活映射,并支持自动同步 +* 事务支持 +* 同时支持原始SQL语句和ORM操作的混合执行 +* 使用连写来简化调用 +* 支持使用ID, In, Where, Limit, Join, Having, Table, SQL, Cols等函数和结构体等方式作为条件 +* 支持级联加载Struct +* Schema支持(仅Postgres) +* 支持缓存 +* 通过 [xorm.io/reverse](https://xorm.io/reverse) 支持根据数据库自动生成 xorm 结构体 +* 支持记录版本(即乐观锁) +* 通过 [xorm.io/builder](https://xorm.io/builder) 内置 SQL Builder 支持 +* 上下文缓存支持 +* 支持日志上下文 + +## 驱动支持 + +目前支持的Go数据库驱动和对应的数据库如下: + +* [Mysql5.*](https://github.com/mysql/mysql-server/tree/5.7) / [Mysql8.*](https://github.com/mysql/mysql-server) / [Mariadb](https://github.com/MariaDB/server) / [Tidb](https://github.com/pingcap/tidb) + - [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) + - [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) + +* [Postgres](https://github.com/postgres/postgres) / [Cockroach](https://github.com/cockroachdb/cockroach) + - [github.com/lib/pq](https://github.com/lib/pq) + +* [SQLite](https://sqlite.org) + - [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) + +* MsSql + - [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) + +* Oracle + - [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (试验性支持) + +## 安装 + + go get xorm.io/xorm + +## 文档 + +* [操作指南](http://xorm.io/docs) + +* [Godoc代码文档](http://pkg.go.dev/xorm.io/xorm) + +# 快速开始 + +* 第一步创建引擎,driverName, dataSourceName和database/sql接口相同 + +```Go +engine, err := xorm.NewEngine(driverName, dataSourceName) +``` + +* 定义一个和表同步的结构体,并且自动同步结构体到数据库 + +```Go +type User struct { + Id int64 + Name string + Salt string + Age int + Passwd string `xorm:"varchar(200)"` + Created time.Time `xorm:"created"` + Updated time.Time `xorm:"updated"` +} + +err := engine.Sync2(new(User)) +``` + +* 创建Engine组 + +```Go +dataSourceNameSlice := []string{masterDataSourceName, slave1DataSourceName, slave2DataSourceName} +engineGroup, err := xorm.NewEngineGroup(driverName, dataSourceNameSlice) +``` + +```Go +masterEngine, err := xorm.NewEngine(driverName, masterDataSourceName) +slave1Engine, err := xorm.NewEngine(driverName, slave1DataSourceName) +slave2Engine, err := xorm.NewEngine(driverName, slave2DataSourceName) +engineGroup, err := xorm.NewEngineGroup(masterEngine, []*Engine{slave1Engine, slave2Engine}) +``` + +所有使用 `engine` 都可以简单的用 `engineGroup` 来替换。 + +* `Query` 最原始的也支持SQL语句查询,返回的结果类型为 []map[string][]byte。`QueryString` 返回 []map[string]string, `QueryInterface` 返回 `[]map[string]interface{}`. + +```Go +results, err := engine.Query("select * from user") +results, err := engine.Where("a = 1").Query() + +results, err := engine.QueryString("select * from user") +results, err := engine.Where("a = 1").QueryString() + +results, err := engine.QueryInterface("select * from user") +results, err := engine.Where("a = 1").QueryInterface() +``` + +* `Exec` 执行一个SQL语句 + +```Go +affected, err := engine.Exec("update user set age = ? where name = ?", age, name) +``` + +* `Insert` 插入一条或者多条记录 + +```Go +affected, err := engine.Insert(&user) +// INSERT INTO struct () values () + +affected, err := engine.Insert(&user1, &user2) +// INSERT INTO struct1 () values () +// INSERT INTO struct2 () values () + +affected, err := engine.Insert(&users) +// INSERT INTO struct () values (),(),() + +affected, err := engine.Insert(&user1, &users) +// INSERT INTO struct1 () values () +// INSERT INTO struct2 () values (),(),() +``` + +* `Get` 查询单条记录 + +```Go +has, err := engine.Get(&user) +// SELECT * FROM user LIMIT 1 + +has, err := engine.Where("name = ?", name).Desc("id").Get(&user) +// SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1 + +var name string +has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name) +// SELECT name FROM user WHERE id = ? + +var id int64 +has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id) +has, err := engine.SQL("select id from user").Get(&id) +// SELECT id FROM user WHERE name = ? + +var valuesMap = make(map[string]string) +has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap) +// SELECT * FROM user WHERE id = ? + +var valuesSlice = make([]interface{}, len(cols)) +has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice) +// SELECT col1, col2, col3 FROM user WHERE id = ? +``` + +* `Exist` 检测记录是否存在 + +```Go +has, err := testEngine.Exist(new(RecordExist)) +// SELECT * FROM record_exist LIMIT 1 + +has, err = testEngine.Exist(&RecordExist{ + Name: "test1", + }) +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 + +has, err = testEngine.Where("name = ?", "test1").Exist(&RecordExist{}) +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 + +has, err = testEngine.SQL("select * from record_exist where name = ?", "test1").Exist() +// select * from record_exist where name = ? + +has, err = testEngine.Table("record_exist").Exist() +// SELECT * FROM record_exist LIMIT 1 + +has, err = testEngine.Table("record_exist").Where("name = ?", "test1").Exist() +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 +``` + +* `Find` 查询多条记录,当然可以使用Join和extends来组合使用 + +```Go +var users []User +err := engine.Where("name = ?", name).And("age > 10").Limit(10, 0).Find(&users) +// SELECT * FROM user WHERE name = ? AND age > 10 limit 10 offset 0 + +type Detail struct { + Id int64 + UserId int64 `xorm:"index"` +} + +type UserDetail struct { + User `xorm:"extends"` + Detail `xorm:"extends"` +} + +var users []UserDetail +err := engine.Table("user").Select("user.*, detail.*") + Join("INNER", "detail", "detail.user_id = user.id"). + Where("user.name = ?", name).Limit(10, 0). + Find(&users) +// SELECT user.*, detail.* FROM user INNER JOIN detail WHERE user.name = ? limit 10 offset 0 +``` + +* `Iterate` 和 `Rows` 根据条件遍历数据库,可以有两种方式: Iterate and Rows + +```Go +err := engine.Iterate(&User{Name:name}, func(idx int, bean interface{}) error { + user := bean.(*User) + return nil +}) +// SELECT * FROM user + +err := engine.BufferSize(100).Iterate(&User{Name:name}, func(idx int, bean interface{}) error { + user := bean.(*User) + return nil +}) +// SELECT * FROM user Limit 0, 100 +// SELECT * FROM user Limit 101, 100 + +rows, err := engine.Rows(&User{Name:name}) +// SELECT * FROM user +defer rows.Close() +bean := new(Struct) +for rows.Next() { + err = rows.Scan(bean) +} +``` + +* `Update` 更新数据,除非使用Cols,AllCols函数指明,默认只更新非空和非0的字段 + +```Go +affected, err := engine.ID(1).Update(&user) +// UPDATE user SET ... Where id = ? + +affected, err := engine.Update(&user, &User{Name:name}) +// UPDATE user SET ... Where name = ? + +var ids = []int64{1, 2, 3} +affected, err := engine.In(ids).Update(&user) +// UPDATE user SET ... Where id IN (?, ?, ?) + +// force update indicated columns by Cols +affected, err := engine.ID(1).Cols("age").Update(&User{Name:name, Age: 12}) +// UPDATE user SET age = ?, updated=? Where id = ? + +// force NOT update indicated columns by Omit +affected, err := engine.ID(1).Omit("name").Update(&User{Name:name, Age: 12}) +// UPDATE user SET age = ?, updated=? Where id = ? + +affected, err := engine.ID(1).AllCols().Update(&user) +// UPDATE user SET name=?,age=?,salt=?,passwd=?,updated=? Where id = ? +``` + +* `Delete` 删除记录,需要注意,删除必须至少有一个条件,否则会报错。要清空数据库可以用EmptyTable + +```Go +affected, err := engine.Where(...).Delete(&user) +// DELETE FROM user Where ... + +affected, err := engine.ID(2).Delete(&user) +// DELETE FROM user Where id = ? +``` + +* `Count` 获取记录条数 + +```Go +counts, err := engine.Count(&user) +// SELECT count(*) AS total FROM user +``` + +* `Sum` 求和函数 + +```Go +agesFloat64, err := engine.Sum(&user, "age") +// SELECT sum(age) AS total FROM user + +agesInt64, err := engine.SumInt(&user, "age") +// SELECT sum(age) AS total FROM user + +sumFloat64Slice, err := engine.Sums(&user, "age", "score") +// SELECT sum(age), sum(score) FROM user + +sumInt64Slice, err := engine.SumsInt(&user, "age", "score") +// SELECT sum(age), sum(score) FROM user +``` + +* 条件编辑器 + +```Go +err := engine.Where(builder.NotIn("a", 1, 2).And(builder.In("b", "c", "d", "e"))).Find(&users) +// SELECT id, name ... FROM user WHERE a NOT IN (?, ?) AND b IN (?, ?, ?) +``` + +* 在一个Go程中多次操作数据库,但没有事务 + +```Go +session := engine.NewSession() +defer session.Close() + +user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} +if _, err := session.Insert(&user1); err != nil { + return err +} + +user2 := Userinfo{Username: "yyy"} +if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return err +} + +if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return err +} + +return nil +``` + +* 在一个Go程中有事务 + +```Go +session := engine.NewSession() +defer session.Close() + +// add Begin() before any action +if err := session.Begin(); err != nil { + // if returned then will rollback automatically + return err +} + +user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} +if _, err := session.Insert(&user1); err != nil { + return err +} + +user2 := Userinfo{Username: "yyy"} +if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return err +} + +if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return err +} + +// add Commit() after all actions +return session.Commit() +``` + +* 事务的简写方法 + +```Go +res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) { + user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} + if _, err := session.Insert(&user1); err != nil { + return nil, err + } + + user2 := Userinfo{Username: "yyy"} + if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return nil, err + } + + if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return nil, err + } + return nil, nil +}) +``` + +* 上下文缓存,如果启用,那么针对单个对象的查询将会被缓存到系统中,可以被下一个查询使用。 + +```Go + sess := engine.NewSession() + defer sess.Close() + + var context = xorm.NewMemoryContextCache() + + var c2 ContextGetStruct + has, err := sess.ID(1).ContextCache(context).Get(&c2) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, 1, c2.Id) + assert.EqualValues(t, "1", c2.Name) + sql, args := sess.LastSQL() + assert.True(t, len(sql) > 0) + assert.True(t, len(args) > 0) + + var c3 ContextGetStruct + has, err = sess.ID(1).ContextCache(context).Get(&c3) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, 1, c3.Id) + assert.EqualValues(t, "1", c3.Name) + sql, args = sess.LastSQL() + assert.True(t, len(sql) == 0) + assert.True(t, len(args) == 0) +``` + +## 贡献 + +如果您也想为Xorm贡献您的力量,请查看 [CONTRIBUTING](https://gitea.com/xorm/xorm/src/branch/master/CONTRIBUTING.md)。您也可以加入QQ群 技术帮助和讨论。 +群一:280360085 (已满) +群二:795010183 + +## Credits + +### Contributors + +感谢所有的贡献者. [[Contribute](CONTRIBUTING.md)]. + + +### Backers + +感谢我们所有的 backers! 🙏 [[成为 backer](https://opencollective.com/xorm#backer)] + + + +### Sponsors + +成为 sponsor 来支持 xorm。您的 logo 将会被显示并被链接到您的网站。 [[成为 sponsor](https://opencollective.com/xorm#sponsor)] + +# 案例 + +* [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea) + +* [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs) + +* [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana) + +* [Go语言中文网](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang) + +* [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader) + +* [Wego](http://github.com/go-tango/wego) + +* [Docker.cn](https://docker.cn/) + +* [Xorm Adapter](https://github.com/casbin/xorm-adapter) for [Casbin](https://github.com/casbin/casbin) - [github.com/casbin/xorm-adapter](https://github.com/casbin/xorm-adapter) + +* [Gowalker](http://gowalker.org) - [github.com/Unknwon/gowalker](http://github.com/Unknwon/gowalker) + +* [Gobuild.io](http://gobuild.io) - [github.com/shxsun/gobuild](http://github.com/shxsun/gobuild) + +* [Sudo China](http://sudochina.com) - [github.com/insionng/toropress](http://github.com/insionng/toropress) + +* [Godaily](http://godaily.org) - [github.com/govc/godaily](http://github.com/govc/godaily) + +* [YouGam](http://www.yougam.com/) + +* [GoCMS - github.com/zzboy/GoCMS](https://github.com/zzdboy/GoCMS) + +* [GoBBS - gobbs.domolo.com](http://gobbs.domolo.com/) + +* [go-blog](http://wangcheng.me) - [github.com/easykoo/go-blog](https://github.com/easykoo/go-blog) + + +## 更新日志 + +请访问 [CHANGELOG.md](CHANGELOG.md) 获得更新日志。 + +## LICENSE + +BSD License +[http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/) diff --git a/vendor/xorm.io/xorm/caches/cache.go b/vendor/xorm.io/xorm/caches/cache.go new file mode 100644 index 0000000..7b80eb8 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/cache.go @@ -0,0 +1,99 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "strings" + "time" + + "xorm.io/xorm/schemas" +) + +const ( + // CacheExpired is default cache expired time + CacheExpired = 60 * time.Minute + // CacheMaxMemory is not use now + CacheMaxMemory = 256 + // CacheGcInterval represents interval time to clear all expired nodes + CacheGcInterval = 10 * time.Minute + // CacheGcMaxRemoved represents max nodes removed when gc + CacheGcMaxRemoved = 20 +) + +// list all the errors +var ( + ErrCacheMiss = errors.New("xorm/cache: key not found") + ErrNotStored = errors.New("xorm/cache: not stored") + // ErrNotExist record does not exist error + ErrNotExist = errors.New("Record does not exist") +) + +// CacheStore is a interface to store cache +type CacheStore interface { + // key is primary key or composite primary key + // value is struct's pointer + // key format : -p--... + Put(key string, value interface{}) error + Get(key string) (interface{}, error) + Del(key string) error +} + +// Cacher is an interface to provide cache +// id format : u--... +type Cacher interface { + GetIds(tableName, sql string) interface{} + GetBean(tableName string, id string) interface{} + PutIds(tableName, sql string, ids interface{}) + PutBean(tableName string, id string, obj interface{}) + DelIds(tableName, sql string) + DelBean(tableName string, id string) + ClearIds(tableName string) + ClearBeans(tableName string) +} + +func encodeIds(ids []schemas.PK) (string, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err := enc.Encode(ids) + + return buf.String(), err +} + +func decodeIds(s string) ([]schemas.PK, error) { + pks := make([]schemas.PK, 0) + + dec := gob.NewDecoder(strings.NewReader(s)) + err := dec.Decode(&pks) + + return pks, err +} + +// GetCacheSql returns cacher PKs via SQL +func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]schemas.PK, error) { + bytes := m.GetIds(tableName, GenSqlKey(sql, args)) + if bytes == nil { + return nil, errors.New("Not Exist") + } + return decodeIds(bytes.(string)) +} + +// PutCacheSql puts cacher SQL and PKs +func PutCacheSql(m Cacher, ids []schemas.PK, tableName, sql string, args interface{}) error { + bytes, err := encodeIds(ids) + if err != nil { + return err + } + m.PutIds(tableName, GenSqlKey(sql, args), bytes) + return nil +} + +// GenSqlKey generates cache key +func GenSqlKey(sql string, args interface{}) string { + return fmt.Sprintf("%v-%v", sql, args) +} diff --git a/vendor/xorm.io/xorm/caches/encode.go b/vendor/xorm.io/xorm/caches/encode.go new file mode 100644 index 0000000..4ba3992 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/encode.go @@ -0,0 +1,58 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "bytes" + "crypto/md5" + "encoding/gob" + "encoding/json" + "fmt" + "io" +) + +// md5 hash string +func Md5(str string) string { + m := md5.New() + io.WriteString(m, str) + return fmt.Sprintf("%x", m.Sum(nil)) +} +func Encode(data interface{}) ([]byte, error) { + //return JsonEncode(data) + return GobEncode(data) +} + +func Decode(data []byte, to interface{}) error { + //return JsonDecode(data, to) + return GobDecode(data, to) +} + +func GobEncode(data interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(&data) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func GobDecode(data []byte, to interface{}) error { + buf := bytes.NewBuffer(data) + dec := gob.NewDecoder(buf) + return dec.Decode(to) +} + +func JsonEncode(data interface{}) ([]byte, error) { + val, err := json.Marshal(data) + if err != nil { + return nil, err + } + return val, nil +} + +func JsonDecode(data []byte, to interface{}) error { + return json.Unmarshal(data, to) +} diff --git a/vendor/xorm.io/xorm/caches/leveldb.go b/vendor/xorm.io/xorm/caches/leveldb.go new file mode 100644 index 0000000..d1a177a --- /dev/null +++ b/vendor/xorm.io/xorm/caches/leveldb.go @@ -0,0 +1,94 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "log" + + "github.com/syndtr/goleveldb/leveldb" +) + +// LevelDBStore implements CacheStore provide local machine +type LevelDBStore struct { + store *leveldb.DB + Debug bool + v interface{} +} + +var _ CacheStore = &LevelDBStore{} + +func NewLevelDBStore(dbfile string) (*LevelDBStore, error) { + db := &LevelDBStore{} + h, err := leveldb.OpenFile(dbfile, nil) + if err != nil { + return nil, err + } + db.store = h + return db, nil +} + +func (s *LevelDBStore) Put(key string, value interface{}) error { + val, err := Encode(value) + if err != nil { + if s.Debug { + log.Println("[LevelDB]EncodeErr: ", err, "Key:", key) + } + return err + } + err = s.store.Put([]byte(key), val, nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]PutErr: ", err, "Key:", key) + } + return err + } + if s.Debug { + log.Println("[LevelDB]Put: ", key) + } + return err +} + +func (s *LevelDBStore) Get(key string) (interface{}, error) { + data, err := s.store.Get([]byte(key), nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]GetErr: ", err, "Key:", key) + } + if err == leveldb.ErrNotFound { + return nil, ErrNotExist + } + return nil, err + } + + err = Decode(data, &s.v) + if err != nil { + if s.Debug { + log.Println("[LevelDB]DecodeErr: ", err, "Key:", key) + } + return nil, err + } + if s.Debug { + log.Println("[LevelDB]Get: ", key, s.v) + } + return s.v, err +} + +func (s *LevelDBStore) Del(key string) error { + err := s.store.Delete([]byte(key), nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]DelErr: ", err, "Key:", key) + } + return err + } + if s.Debug { + log.Println("[LevelDB]Del: ", key) + } + return err +} + +func (s *LevelDBStore) Close() { + s.store.Close() +} diff --git a/vendor/xorm.io/xorm/caches/lru.go b/vendor/xorm.io/xorm/caches/lru.go new file mode 100644 index 0000000..6b45ac9 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/lru.go @@ -0,0 +1,282 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "container/list" + "fmt" + "sync" + "time" +) + +// LRUCacher implments cache object facilities +type LRUCacher struct { + idList *list.List + sqlList *list.List + idIndex map[string]map[string]*list.Element + sqlIndex map[string]map[string]*list.Element + store CacheStore + mutex sync.Mutex + MaxElementSize int + Expired time.Duration + GcInterval time.Duration +} + +// NewLRUCacher creates a cacher +func NewLRUCacher(store CacheStore, maxElementSize int) *LRUCacher { + return NewLRUCacher2(store, 3600*time.Second, maxElementSize) +} + +// NewLRUCacher2 creates a cache include different params +func NewLRUCacher2(store CacheStore, expired time.Duration, maxElementSize int) *LRUCacher { + cacher := &LRUCacher{store: store, idList: list.New(), + sqlList: list.New(), Expired: expired, + GcInterval: CacheGcInterval, MaxElementSize: maxElementSize, + sqlIndex: make(map[string]map[string]*list.Element), + idIndex: make(map[string]map[string]*list.Element), + } + cacher.RunGC() + return cacher +} + +// RunGC run once every m.GcInterval +func (m *LRUCacher) RunGC() { + time.AfterFunc(m.GcInterval, func() { + m.RunGC() + m.GC() + }) +} + +// GC check ids lit and sql list to remove all element expired +func (m *LRUCacher) GC() { + m.mutex.Lock() + defer m.mutex.Unlock() + var removedNum int + for e := m.idList.Front(); e != nil; { + if removedNum <= CacheGcMaxRemoved && + time.Now().Sub(e.Value.(*idNode).lastVisit) > m.Expired { + removedNum++ + next := e.Next() + node := e.Value.(*idNode) + m.delBean(node.tbName, node.id) + e = next + } else { + break + } + } + + removedNum = 0 + for e := m.sqlList.Front(); e != nil; { + if removedNum <= CacheGcMaxRemoved && + time.Now().Sub(e.Value.(*sqlNode).lastVisit) > m.Expired { + removedNum++ + next := e.Next() + node := e.Value.(*sqlNode) + m.delIds(node.tbName, node.sql) + e = next + } else { + break + } + } +} + +// GetIds returns all bean's ids according to sql and parameter from cache +func (m *LRUCacher) GetIds(tableName, sql string) interface{} { + m.mutex.Lock() + defer m.mutex.Unlock() + if _, ok := m.sqlIndex[tableName]; !ok { + m.sqlIndex[tableName] = make(map[string]*list.Element) + } + if v, err := m.store.Get(sql); err == nil { + if el, ok := m.sqlIndex[tableName][sql]; !ok { + el = m.sqlList.PushBack(newSQLNode(tableName, sql)) + m.sqlIndex[tableName][sql] = el + } else { + lastTime := el.Value.(*sqlNode).lastVisit + // if expired, remove the node and return nil + if time.Now().Sub(lastTime) > m.Expired { + m.delIds(tableName, sql) + return nil + } + m.sqlList.MoveToBack(el) + el.Value.(*sqlNode).lastVisit = time.Now() + } + return v + } + + m.delIds(tableName, sql) + return nil +} + +// GetBean returns bean according tableName and id from cache +func (m *LRUCacher) GetBean(tableName string, id string) interface{} { + m.mutex.Lock() + defer m.mutex.Unlock() + if _, ok := m.idIndex[tableName]; !ok { + m.idIndex[tableName] = make(map[string]*list.Element) + } + tid := genID(tableName, id) + if v, err := m.store.Get(tid); err == nil { + if el, ok := m.idIndex[tableName][id]; ok { + lastTime := el.Value.(*idNode).lastVisit + // if expired, remove the node and return nil + if time.Now().Sub(lastTime) > m.Expired { + m.delBean(tableName, id) + return nil + } + m.idList.MoveToBack(el) + el.Value.(*idNode).lastVisit = time.Now() + } else { + el = m.idList.PushBack(newIDNode(tableName, id)) + m.idIndex[tableName][id] = el + } + return v + } + + // store bean is not exist, then remove memory's index + m.delBean(tableName, id) + return nil +} + +// clearIds clears all sql-ids mapping on table tableName from cache +func (m *LRUCacher) clearIds(tableName string) { + if tis, ok := m.sqlIndex[tableName]; ok { + for sql, v := range tis { + m.sqlList.Remove(v) + m.store.Del(sql) + } + } + m.sqlIndex[tableName] = make(map[string]*list.Element) +} + +// ClearIds clears all sql-ids mapping on table tableName from cache +func (m *LRUCacher) ClearIds(tableName string) { + m.mutex.Lock() + m.clearIds(tableName) + m.mutex.Unlock() +} + +func (m *LRUCacher) clearBeans(tableName string) { + if tis, ok := m.idIndex[tableName]; ok { + for id, v := range tis { + m.idList.Remove(v) + tid := genID(tableName, id) + m.store.Del(tid) + } + } + m.idIndex[tableName] = make(map[string]*list.Element) +} + +// ClearBeans clears all beans in some table +func (m *LRUCacher) ClearBeans(tableName string) { + m.mutex.Lock() + m.clearBeans(tableName) + m.mutex.Unlock() +} + +// PutIds pus ids into table +func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) { + m.mutex.Lock() + if _, ok := m.sqlIndex[tableName]; !ok { + m.sqlIndex[tableName] = make(map[string]*list.Element) + } + if el, ok := m.sqlIndex[tableName][sql]; !ok { + el = m.sqlList.PushBack(newSQLNode(tableName, sql)) + m.sqlIndex[tableName][sql] = el + } else { + el.Value.(*sqlNode).lastVisit = time.Now() + } + m.store.Put(sql, ids) + if m.sqlList.Len() > m.MaxElementSize { + e := m.sqlList.Front() + node := e.Value.(*sqlNode) + m.delIds(node.tbName, node.sql) + } + m.mutex.Unlock() +} + +// PutBean puts beans into table +func (m *LRUCacher) PutBean(tableName string, id string, obj interface{}) { + m.mutex.Lock() + var el *list.Element + var ok bool + + if el, ok = m.idIndex[tableName][id]; !ok { + el = m.idList.PushBack(newIDNode(tableName, id)) + m.idIndex[tableName][id] = el + } else { + el.Value.(*idNode).lastVisit = time.Now() + } + + m.store.Put(genID(tableName, id), obj) + if m.idList.Len() > m.MaxElementSize { + e := m.idList.Front() + node := e.Value.(*idNode) + m.delBean(node.tbName, node.id) + } + m.mutex.Unlock() +} + +func (m *LRUCacher) delIds(tableName, sql string) { + if _, ok := m.sqlIndex[tableName]; ok { + if el, ok := m.sqlIndex[tableName][sql]; ok { + delete(m.sqlIndex[tableName], sql) + m.sqlList.Remove(el) + } + } + m.store.Del(sql) +} + +// DelIds deletes ids +func (m *LRUCacher) DelIds(tableName, sql string) { + m.mutex.Lock() + m.delIds(tableName, sql) + m.mutex.Unlock() +} + +func (m *LRUCacher) delBean(tableName string, id string) { + tid := genID(tableName, id) + if el, ok := m.idIndex[tableName][id]; ok { + delete(m.idIndex[tableName], id) + m.idList.Remove(el) + m.clearIds(tableName) + } + m.store.Del(tid) +} + +// DelBean deletes beans in some table +func (m *LRUCacher) DelBean(tableName string, id string) { + m.mutex.Lock() + m.delBean(tableName, id) + m.mutex.Unlock() +} + +type idNode struct { + tbName string + id string + lastVisit time.Time +} + +type sqlNode struct { + tbName string + sql string + lastVisit time.Time +} + +func genSQLKey(sql string, args interface{}) string { + return fmt.Sprintf("%s-%v", sql, args) +} + +func genID(prefix string, id string) string { + return fmt.Sprintf("%s-%s", prefix, id) +} + +func newIDNode(tbName string, id string) *idNode { + return &idNode{tbName, id, time.Now()} +} + +func newSQLNode(tbName, sql string) *sqlNode { + return &sqlNode{tbName, sql, time.Now()} +} diff --git a/vendor/xorm.io/xorm/caches/manager.go b/vendor/xorm.io/xorm/caches/manager.go new file mode 100644 index 0000000..0504521 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/manager.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import "sync" + +type Manager struct { + cacher Cacher + disableGlobalCache bool + + cachers map[string]Cacher + cacherLock sync.RWMutex +} + +func NewManager() *Manager { + return &Manager{ + cachers: make(map[string]Cacher), + } +} + +// SetDisableGlobalCache disable global cache or not +func (mgr *Manager) SetDisableGlobalCache(disable bool) { + if mgr.disableGlobalCache != disable { + mgr.disableGlobalCache = disable + } +} + +func (mgr *Manager) SetCacher(tableName string, cacher Cacher) { + mgr.cacherLock.Lock() + mgr.cachers[tableName] = cacher + mgr.cacherLock.Unlock() +} + +func (mgr *Manager) GetCacher(tableName string) Cacher { + var cacher Cacher + var ok bool + mgr.cacherLock.RLock() + cacher, ok = mgr.cachers[tableName] + mgr.cacherLock.RUnlock() + if !ok && !mgr.disableGlobalCache { + cacher = mgr.cacher + } + return cacher +} + +// SetDefaultCacher set the default cacher. Xorm's default not enable cacher. +func (mgr *Manager) SetDefaultCacher(cacher Cacher) { + mgr.cacher = cacher +} + +// GetDefaultCacher returns the default cacher +func (mgr *Manager) GetDefaultCacher() Cacher { + return mgr.cacher +} diff --git a/vendor/xorm.io/xorm/caches/memory_store.go b/vendor/xorm.io/xorm/caches/memory_store.go new file mode 100644 index 0000000..f16254d --- /dev/null +++ b/vendor/xorm.io/xorm/caches/memory_store.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "sync" +) + +var _ CacheStore = NewMemoryStore() + +// MemoryStore represents in-memory store +type MemoryStore struct { + store map[interface{}]interface{} + mutex sync.RWMutex +} + +// NewMemoryStore creates a new store in memory +func NewMemoryStore() *MemoryStore { + return &MemoryStore{store: make(map[interface{}]interface{})} +} + +// Put puts object into store +func (s *MemoryStore) Put(key string, value interface{}) error { + s.mutex.Lock() + defer s.mutex.Unlock() + s.store[key] = value + return nil +} + +// Get gets object from store +func (s *MemoryStore) Get(key string) (interface{}, error) { + s.mutex.RLock() + defer s.mutex.RUnlock() + if v, ok := s.store[key]; ok { + return v, nil + } + + return nil, ErrNotExist +} + +// Del deletes object +func (s *MemoryStore) Del(key string) error { + s.mutex.Lock() + defer s.mutex.Unlock() + delete(s.store, key) + return nil +} diff --git a/vendor/xorm.io/xorm/contexts/context_cache.go b/vendor/xorm.io/xorm/contexts/context_cache.go new file mode 100644 index 0000000..0d0f0f0 --- /dev/null +++ b/vendor/xorm.io/xorm/contexts/context_cache.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package contexts + +// ContextCache is the interface that operates the cache data. +type ContextCache interface { + // Put puts value into cache with key. + Put(key string, val interface{}) + // Get gets cached value by given key. + Get(key string) interface{} +} + +type memoryContextCache map[string]interface{} + +// NewMemoryContextCache return memoryContextCache +func NewMemoryContextCache() memoryContextCache { + return make(map[string]interface{}) +} + +// Put puts value into cache with key. +func (m memoryContextCache) Put(key string, val interface{}) { + m[key] = val +} + +// Get gets cached value by given key. +func (m memoryContextCache) Get(key string) interface{} { + return m[key] +} diff --git a/vendor/xorm.io/xorm/contexts/hook.go b/vendor/xorm.io/xorm/contexts/hook.go new file mode 100644 index 0000000..71ad8e8 --- /dev/null +++ b/vendor/xorm.io/xorm/contexts/hook.go @@ -0,0 +1,75 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package contexts + +import ( + "context" + "database/sql" + "time" +) + +// ContextHook represents a hook context +type ContextHook struct { + start time.Time + Ctx context.Context + SQL string // log content or SQL + Args []interface{} // if it's a SQL, it's the arguments + Result sql.Result + ExecuteTime time.Duration + Err error // SQL executed error +} + +// NewContextHook return context for hook +func NewContextHook(ctx context.Context, sql string, args []interface{}) *ContextHook { + return &ContextHook{ + start: time.Now(), + Ctx: ctx, + SQL: sql, + Args: args, + } +} + +func (c *ContextHook) End(ctx context.Context, result sql.Result, err error) { + c.Ctx = ctx + c.Result = result + c.Err = err + c.ExecuteTime = time.Now().Sub(c.start) +} + +type Hook interface { + BeforeProcess(c *ContextHook) (context.Context, error) + AfterProcess(c *ContextHook) error +} + +type Hooks struct { + hooks []Hook +} + +func (h *Hooks) AddHook(hooks ...Hook) { + h.hooks = append(h.hooks, hooks...) +} + +func (h *Hooks) BeforeProcess(c *ContextHook) (context.Context, error) { + ctx := c.Ctx + for _, h := range h.hooks { + var err error + ctx, err = h.BeforeProcess(c) + if err != nil { + return nil, err + } + } + return ctx, nil +} + +func (h *Hooks) AfterProcess(c *ContextHook) error { + firstErr := c.Err + for _, h := range h.hooks { + err := h.AfterProcess(c) + if err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} diff --git a/vendor/xorm.io/xorm/convert.go b/vendor/xorm.io/xorm/convert.go new file mode 100644 index 0000000..c19d30e --- /dev/null +++ b/vendor/xorm.io/xorm/convert.go @@ -0,0 +1,422 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "strconv" + "time" +) + +var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error + +func strconvErr(err error) error { + if ne, ok := err.(*strconv.NumError); ok { + return ne.Err + } + return err +} + +func cloneBytes(b []byte) []byte { + if b == nil { + return nil + } + c := make([]byte, len(b)) + copy(c, b) + return c +} + +func asString(src interface{}) string { + switch v := src.(type) { + case string: + return v + case []byte: + return string(v) + } + rv := reflect.ValueOf(src) + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(rv.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(rv.Uint(), 10) + case reflect.Float64: + return strconv.FormatFloat(rv.Float(), 'g', -1, 64) + case reflect.Float32: + return strconv.FormatFloat(rv.Float(), 'g', -1, 32) + case reflect.Bool: + return strconv.FormatBool(rv.Bool()) + } + return fmt.Sprintf("%v", src) +} + +func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.AppendInt(buf, rv.Int(), 10), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.AppendUint(buf, rv.Uint(), 10), true + case reflect.Float32: + return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true + case reflect.Float64: + return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true + case reflect.Bool: + return strconv.AppendBool(buf, rv.Bool()), true + case reflect.String: + s := rv.String() + return append(buf, s...), true + } + return +} + +// convertAssign copies to dest the value in src, converting it if possible. +// An error is returned if the copy would result in loss of information. +// dest should be a pointer type. +func convertAssign(dest, src interface{}) error { + // Common cases, without reflect. + switch s := src.(type) { + case string: + switch d := dest.(type) { + case *string: + if d == nil { + return errNilPtr + } + *d = s + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = []byte(s) + return nil + } + case []byte: + switch d := dest.(type) { + case *string: + if d == nil { + return errNilPtr + } + *d = string(s) + return nil + case *interface{}: + if d == nil { + return errNilPtr + } + *d = cloneBytes(s) + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = cloneBytes(s) + return nil + } + + case time.Time: + switch d := dest.(type) { + case *string: + *d = s.Format(time.RFC3339Nano) + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = []byte(s.Format(time.RFC3339Nano)) + return nil + } + case nil: + switch d := dest.(type) { + case *interface{}: + if d == nil { + return errNilPtr + } + *d = nil + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = nil + return nil + } + } + + var sv reflect.Value + + switch d := dest.(type) { + case *string: + sv = reflect.ValueOf(src) + switch sv.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + *d = asString(src) + return nil + } + case *[]byte: + sv = reflect.ValueOf(src) + if b, ok := asBytes(nil, sv); ok { + *d = b + return nil + } + case *bool: + bv, err := driver.Bool.ConvertValue(src) + if err == nil { + *d = bv.(bool) + } + return err + case *interface{}: + *d = src + return nil + } + + dpv := reflect.ValueOf(dest) + if dpv.Kind() != reflect.Ptr { + return errors.New("destination not a pointer") + } + if dpv.IsNil() { + return errNilPtr + } + + if !sv.IsValid() { + sv = reflect.ValueOf(src) + } + + dv := reflect.Indirect(dpv) + if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) { + switch b := src.(type) { + case []byte: + dv.Set(reflect.ValueOf(cloneBytes(b))) + default: + dv.Set(sv) + } + return nil + } + + if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) { + dv.Set(sv.Convert(dv.Type())) + return nil + } + + switch dv.Kind() { + case reflect.Ptr: + if src == nil { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + + dv.Set(reflect.New(dv.Type().Elem())) + return convertAssign(dv.Interface(), src) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := asString(src) + i64, err := strconv.ParseInt(s, 10, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetInt(i64) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + s := asString(src) + u64, err := strconv.ParseUint(s, 10, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetUint(u64) + return nil + case reflect.Float32, reflect.Float64: + s := asString(src) + f64, err := strconv.ParseFloat(s, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetFloat(f64) + return nil + case reflect.String: + dv.SetString(asString(src)) + return nil + } + + return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest) +} + +func asKind(vv reflect.Value, tp reflect.Type) (interface{}, error) { + switch tp.Kind() { + case reflect.Int64: + return vv.Int(), nil + case reflect.Int: + return int(vv.Int()), nil + case reflect.Int32: + return int32(vv.Int()), nil + case reflect.Int16: + return int16(vv.Int()), nil + case reflect.Int8: + return int8(vv.Int()), nil + case reflect.Uint64: + return vv.Uint(), nil + case reflect.Uint: + return uint(vv.Uint()), nil + case reflect.Uint32: + return uint32(vv.Uint()), nil + case reflect.Uint16: + return uint16(vv.Uint()), nil + case reflect.Uint8: + return uint8(vv.Uint()), nil + case reflect.String: + return vv.String(), nil + case reflect.Slice: + if tp.Elem().Kind() == reflect.Uint8 { + v, err := strconv.ParseInt(string(vv.Interface().([]byte)), 10, 64) + if err != nil { + return nil, err + } + return v, nil + } + + } + return nil, fmt.Errorf("unsupported primary key type: %v, %v", tp, vv) +} + +func asBool(bs []byte) (bool, error) { + if len(bs) == 0 { + return false, nil + } + if bs[0] == 0x00 { + return false, nil + } else if bs[0] == 0x01 { + return true, nil + } + return strconv.ParseBool(string(bs)) +} + +// str2PK convert string value to primary key value according to tp +func str2PKValue(s string, tp reflect.Type) (reflect.Value, error) { + var err error + var result interface{} + var defReturn = reflect.Zero(tp) + + switch tp.Kind() { + case reflect.Int: + result, err = strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int: %s", s, err.Error()) + } + case reflect.Int8: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int8: %s", s, err.Error()) + } + result = int8(x) + case reflect.Int16: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int16: %s", s, err.Error()) + } + result = int16(x) + case reflect.Int32: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int32: %s", s, err.Error()) + } + result = int32(x) + case reflect.Int64: + result, err = strconv.ParseInt(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int64: %s", s, err.Error()) + } + case reflect.Uint: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint: %s", s, err.Error()) + } + result = uint(x) + case reflect.Uint8: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint8: %s", s, err.Error()) + } + result = uint8(x) + case reflect.Uint16: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint16: %s", s, err.Error()) + } + result = uint16(x) + case reflect.Uint32: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint32: %s", s, err.Error()) + } + result = uint32(x) + case reflect.Uint64: + result, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint64: %s", s, err.Error()) + } + case reflect.String: + result = s + default: + return defReturn, errors.New("unsupported convert type") + } + return reflect.ValueOf(result).Convert(tp), nil +} + +func str2PK(s string, tp reflect.Type) (interface{}, error) { + v, err := str2PKValue(s, tp) + if err != nil { + return nil, err + } + return v.Interface(), nil +} + +func int64ToIntValue(id int64, tp reflect.Type) reflect.Value { + var v interface{} + kind := tp.Kind() + + if kind == reflect.Ptr { + kind = tp.Elem().Kind() + } + + switch kind { + case reflect.Int16: + temp := int16(id) + v = &temp + case reflect.Int32: + temp := int32(id) + v = &temp + case reflect.Int: + temp := int(id) + v = &temp + case reflect.Int64: + temp := id + v = &temp + case reflect.Uint16: + temp := uint16(id) + v = &temp + case reflect.Uint32: + temp := uint32(id) + v = &temp + case reflect.Uint64: + temp := uint64(id) + v = &temp + case reflect.Uint: + temp := uint(id) + v = &temp + } + + if tp.Kind() == reflect.Ptr { + return reflect.ValueOf(v).Convert(tp) + } + return reflect.ValueOf(v).Elem().Convert(tp) +} + +func int64ToInt(id int64, tp reflect.Type) interface{} { + return int64ToIntValue(id, tp).Interface() +} diff --git a/vendor/xorm.io/xorm/convert/conversion.go b/vendor/xorm.io/xorm/convert/conversion.go new file mode 100644 index 0000000..16f1a92 --- /dev/null +++ b/vendor/xorm.io/xorm/convert/conversion.go @@ -0,0 +1,12 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package convert + +// Conversion is an interface. A type implements Conversion will according +// the custom method to fill into database and retrieve from database. +type Conversion interface { + FromDB([]byte) error + ToDB() ([]byte, error) +} diff --git a/vendor/xorm.io/xorm/core/db.go b/vendor/xorm.io/xorm/core/db.go new file mode 100644 index 0000000..50c64c6 --- /dev/null +++ b/vendor/xorm.io/xorm/core/db.go @@ -0,0 +1,293 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "sync" + + "xorm.io/xorm/contexts" + "xorm.io/xorm/log" + "xorm.io/xorm/names" +) + +var ( + // DefaultCacheSize sets the default cache size + DefaultCacheSize = 200 +) + +func MapToSlice(query string, mp interface{}) (string, []interface{}, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return "", []interface{}{}, ErrNoMapPointer + } + + args := make([]interface{}, 0, len(vv.Elem().MapKeys())) + var err error + query = re.ReplaceAllStringFunc(query, func(src string) string { + v := vv.Elem().MapIndex(reflect.ValueOf(src[1:])) + if !v.IsValid() { + err = fmt.Errorf("map key %s is missing", src[1:]) + } else { + args = append(args, v.Interface()) + } + return "?" + }) + + return query, args, err +} + +func StructToSlice(query string, st interface{}) (string, []interface{}, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return "", []interface{}{}, ErrNoStructPointer + } + + args := make([]interface{}, 0) + var err error + query = re.ReplaceAllStringFunc(query, func(src string) string { + fv := vv.Elem().FieldByName(src[1:]).Interface() + if v, ok := fv.(driver.Valuer); ok { + var value driver.Value + value, err = v.Value() + if err != nil { + return "?" + } + args = append(args, value) + } else { + args = append(args, fv) + } + return "?" + }) + if err != nil { + return "", []interface{}{}, err + } + return query, args, nil +} + +type cacheStruct struct { + value reflect.Value + idx int +} + +var ( + _ QueryExecuter = &DB{} +) + +// DB is a wrap of sql.DB with extra contents +type DB struct { + *sql.DB + Mapper names.Mapper + reflectCache map[reflect.Type]*cacheStruct + reflectCacheMutex sync.RWMutex + Logger log.ContextLogger + hooks contexts.Hooks +} + +// Open opens a database +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{ + DB: db, + Mapper: names.NewCacheMapper(&names.SnakeMapper{}), + reflectCache: make(map[reflect.Type]*cacheStruct), + }, nil +} + +// FromDB creates a DB from a sql.DB +func FromDB(db *sql.DB) *DB { + return &DB{ + DB: db, + Mapper: names.NewCacheMapper(&names.SnakeMapper{}), + reflectCache: make(map[reflect.Type]*cacheStruct), + } +} + +// NeedLogSQL returns true if need to log SQL +func (db *DB) NeedLogSQL(ctx context.Context) bool { + if db.Logger == nil { + return false + } + + v := ctx.Value(log.SessionShowSQLKey) + if showSQL, ok := v.(bool); ok { + return showSQL + } + return db.Logger.IsShowSQL() +} + +func (db *DB) reflectNew(typ reflect.Type) reflect.Value { + db.reflectCacheMutex.Lock() + defer db.reflectCacheMutex.Unlock() + cs, ok := db.reflectCache[typ] + if !ok || cs.idx+1 > DefaultCacheSize-1 { + cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), DefaultCacheSize, DefaultCacheSize), 0} + db.reflectCache[typ] = cs + } else { + cs.idx = cs.idx + 1 + } + return cs.value.Index(cs.idx).Addr() +} + +// QueryContext overwrites sql.DB.QueryContext +func (db *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + hookCtx := contexts.NewContextHook(ctx, query, args) + ctx, err := db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + rows, err := db.DB.QueryContext(ctx, query, args...) + hookCtx.End(ctx, nil, err) + if err := db.afterProcess(hookCtx); err != nil { + if rows != nil { + rows.Close() + } + return nil, err + } + return &Rows{rows, db}, nil +} + +// Query overwrites sql.DB.Query +func (db *DB) Query(query string, args ...interface{}) (*Rows, error) { + return db.QueryContext(context.Background(), query, args...) +} + +// QueryMapContext executes query with parameters via map and context +func (db *DB) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return db.QueryContext(ctx, query, args...) +} + +// QueryMap executes query with parameters via map +func (db *DB) QueryMap(query string, mp interface{}) (*Rows, error) { + return db.QueryMapContext(context.Background(), query, mp) +} + +func (db *DB) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return db.QueryContext(ctx, query, args...) +} + +func (db *DB) QueryStruct(query string, st interface{}) (*Rows, error) { + return db.QueryStructContext(context.Background(), query, st) +} + +func (db *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.QueryContext(ctx, query, args...) + if err != nil { + return &Row{nil, err} + } + return &Row{rows, nil} +} + +func (db *DB) QueryRow(query string, args ...interface{}) *Row { + return db.QueryRowContext(context.Background(), query, args...) +} + +func (db *DB) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row { + query, args, err := MapToSlice(query, mp) + if err != nil { + return &Row{nil, err} + } + return db.QueryRowContext(ctx, query, args...) +} + +func (db *DB) QueryRowMap(query string, mp interface{}) *Row { + return db.QueryRowMapContext(context.Background(), query, mp) +} + +func (db *DB) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row { + query, args, err := StructToSlice(query, st) + if err != nil { + return &Row{nil, err} + } + return db.QueryRowContext(ctx, query, args...) +} + +func (db *DB) QueryRowStruct(query string, st interface{}) *Row { + return db.QueryRowStructContext(context.Background(), query, st) +} + +var ( + re = regexp.MustCompile(`[?](\w+)`) +) + +// ExecMapContext exec map with context.ContextHook +// insert into (name) values (?) +// insert into (name) values (?name) +func (db *DB) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return db.ExecContext(ctx, query, args...) +} + +func (db *DB) ExecMap(query string, mp interface{}) (sql.Result, error) { + return db.ExecMapContext(context.Background(), query, mp) +} + +func (db *DB) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return db.ExecContext(ctx, query, args...) +} + +func (db *DB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + hookCtx := contexts.NewContextHook(ctx, query, args) + ctx, err := db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + res, err := db.DB.ExecContext(ctx, query, args...) + hookCtx.End(ctx, res, err) + if err := db.afterProcess(hookCtx); err != nil { + return nil, err + } + return res, nil +} + +func (db *DB) ExecStruct(query string, st interface{}) (sql.Result, error) { + return db.ExecStructContext(context.Background(), query, st) +} + +func (db *DB) beforeProcess(c *contexts.ContextHook) (context.Context, error) { + if db.NeedLogSQL(c.Ctx) { + db.Logger.BeforeSQL(log.LogContext(*c)) + } + ctx, err := db.hooks.BeforeProcess(c) + if err != nil { + return nil, err + } + return ctx, nil +} + +func (db *DB) afterProcess(c *contexts.ContextHook) error { + err := db.hooks.AfterProcess(c) + if db.NeedLogSQL(c.Ctx) { + db.Logger.AfterSQL(log.LogContext(*c)) + } + return err +} + +func (db *DB) AddHook(h ...contexts.Hook) { + db.hooks.AddHook(h...) +} diff --git a/vendor/xorm.io/xorm/core/error.go b/vendor/xorm.io/xorm/core/error.go new file mode 100644 index 0000000..1fd1834 --- /dev/null +++ b/vendor/xorm.io/xorm/core/error.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import "errors" + +var ( + // ErrNoMapPointer represents error when no map pointer + ErrNoMapPointer = errors.New("mp should be a map's pointer") + // ErrNoStructPointer represents error when no struct pointer + ErrNoStructPointer = errors.New("mp should be a struct's pointer") +) diff --git a/vendor/xorm.io/xorm/core/interface.go b/vendor/xorm.io/xorm/core/interface.go new file mode 100644 index 0000000..a5c8e4e --- /dev/null +++ b/vendor/xorm.io/xorm/core/interface.go @@ -0,0 +1,22 @@ +package core + +import ( + "context" + "database/sql" +) + +// Queryer represents an interface to query a SQL to get data from database +type Queryer interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) +} + +// Executer represents an interface to execute a SQL +type Executer interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// QueryExecuter combines the Queryer and Executer +type QueryExecuter interface { + Queryer + Executer +} diff --git a/vendor/xorm.io/xorm/core/rows.go b/vendor/xorm.io/xorm/core/rows.go new file mode 100644 index 0000000..a1e8bfb --- /dev/null +++ b/vendor/xorm.io/xorm/core/rows.go @@ -0,0 +1,338 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "database/sql" + "errors" + "reflect" + "sync" +) + +type Rows struct { + *sql.Rows + db *DB +} + +func (rs *Rows) ToMapString() ([]map[string]string, error) { + cols, err := rs.Columns() + if err != nil { + return nil, err + } + + var results = make([]map[string]string, 0, 10) + for rs.Next() { + var record = make(map[string]string, len(cols)) + err = rs.ScanMap(&record) + if err != nil { + return nil, err + } + results = append(results, record) + } + return results, nil +} + +// scan data to a struct's pointer according field index +func (rs *Rows) ScanStructByIndex(dest ...interface{}) error { + if len(dest) == 0 { + return errors.New("at least one struct") + } + + vvvs := make([]reflect.Value, len(dest)) + for i, s := range dest { + vv := reflect.ValueOf(s) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return errors.New("dest should be a struct's pointer") + } + + vvvs[i] = vv.Elem() + } + + cols, err := rs.Columns() + if err != nil { + return err + } + newDest := make([]interface{}, len(cols)) + + var i = 0 + for _, vvv := range vvvs { + for j := 0; j < vvv.NumField(); j++ { + newDest[i] = vvv.Field(j).Addr().Interface() + i = i + 1 + } + } + + return rs.Rows.Scan(newDest...) +} + +var ( + fieldCache = make(map[reflect.Type]map[string]int) + fieldCacheMutex sync.RWMutex +) + +func fieldByName(v reflect.Value, name string) reflect.Value { + t := v.Type() + fieldCacheMutex.RLock() + cache, ok := fieldCache[t] + fieldCacheMutex.RUnlock() + if !ok { + cache = make(map[string]int) + for i := 0; i < v.NumField(); i++ { + cache[t.Field(i).Name] = i + } + fieldCacheMutex.Lock() + fieldCache[t] = cache + fieldCacheMutex.Unlock() + } + + if i, ok := cache[name]; ok { + return v.Field(i) + } + + return reflect.Zero(t) +} + +// scan data to a struct's pointer according field name +func (rs *Rows) ScanStructByName(dest interface{}) error { + vv := reflect.ValueOf(dest) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return errors.New("dest should be a struct's pointer") + } + + cols, err := rs.Columns() + if err != nil { + return err + } + + newDest := make([]interface{}, len(cols)) + var v EmptyScanner + for j, name := range cols { + f := fieldByName(vv.Elem(), rs.db.Mapper.Table2Obj(name)) + if f.IsValid() { + newDest[j] = f.Addr().Interface() + } else { + newDest[j] = &v + } + } + + return rs.Rows.Scan(newDest...) +} + +// scan data to a slice's pointer, slice's length should equal to columns' number +func (rs *Rows) ScanSlice(dest interface{}) error { + vv := reflect.ValueOf(dest) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Slice { + return errors.New("dest should be a slice's pointer") + } + + vvv := vv.Elem() + cols, err := rs.Columns() + if err != nil { + return err + } + + newDest := make([]interface{}, len(cols)) + + for j := 0; j < len(cols); j++ { + if j >= vvv.Len() { + newDest[j] = reflect.New(vvv.Type().Elem()).Interface() + } else { + newDest[j] = vvv.Index(j).Addr().Interface() + } + } + + err = rs.Rows.Scan(newDest...) + if err != nil { + return err + } + + srcLen := vvv.Len() + for i := srcLen; i < len(cols); i++ { + vvv = reflect.Append(vvv, reflect.ValueOf(newDest[i]).Elem()) + } + return nil +} + +// scan data to a map's pointer +func (rs *Rows) ScanMap(dest interface{}) error { + vv := reflect.ValueOf(dest) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return errors.New("dest should be a map's pointer") + } + + cols, err := rs.Columns() + if err != nil { + return err + } + + newDest := make([]interface{}, len(cols)) + vvv := vv.Elem() + + for i := range cols { + newDest[i] = rs.db.reflectNew(vvv.Type().Elem()).Interface() + } + + err = rs.Rows.Scan(newDest...) + if err != nil { + return err + } + + for i, name := range cols { + vname := reflect.ValueOf(name) + vvv.SetMapIndex(vname, reflect.ValueOf(newDest[i]).Elem()) + } + + return nil +} + +type Row struct { + rows *Rows + // One of these two will be non-nil: + err error // deferred error for easy chaining +} + +// ErrorRow return an error row +func ErrorRow(err error) *Row { + return &Row{ + err: err, + } +} + +// NewRow from rows +func NewRow(rows *Rows, err error) *Row { + return &Row{rows, err} +} + +func (row *Row) Columns() ([]string, error) { + if row.err != nil { + return nil, row.err + } + return row.rows.Columns() +} + +func (row *Row) Scan(dest ...interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + for _, dp := range dest { + if _, ok := dp.(*sql.RawBytes); ok { + return errors.New("sql: RawBytes isn't allowed on Row.Scan") + } + } + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.Scan(dest...) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +func (row *Row) ScanStructByName(dest interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.ScanStructByName(dest) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +func (row *Row) ScanStructByIndex(dest interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.ScanStructByIndex(dest) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +// scan data to a slice's pointer, slice's length should equal to columns' number +func (row *Row) ScanSlice(dest interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.ScanSlice(dest) + if err != nil { + return err + } + + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +// scan data to a map's pointer +func (row *Row) ScanMap(dest interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.ScanMap(dest) + if err != nil { + return err + } + + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +func (row *Row) ToMapString() (map[string]string, error) { + cols, err := row.Columns() + if err != nil { + return nil, err + } + + var record = make(map[string]string, len(cols)) + err = row.ScanMap(&record) + if err != nil { + return nil, err + } + + return record, nil +} diff --git a/vendor/xorm.io/xorm/core/scan.go b/vendor/xorm.io/xorm/core/scan.go new file mode 100644 index 0000000..897b534 --- /dev/null +++ b/vendor/xorm.io/xorm/core/scan.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "database/sql/driver" + "fmt" + "time" +) + +type NullTime time.Time + +var ( + _ driver.Valuer = NullTime{} +) + +func (ns *NullTime) Scan(value interface{}) error { + if value == nil { + return nil + } + return convertTime(ns, value) +} + +// Value implements the driver Valuer interface. +func (ns NullTime) Value() (driver.Value, error) { + if (time.Time)(ns).IsZero() { + return nil, nil + } + return (time.Time)(ns).Format("2006-01-02 15:04:05"), nil +} + +func convertTime(dest *NullTime, src interface{}) error { + // Common cases, without reflect. + switch s := src.(type) { + case string: + t, err := time.Parse("2006-01-02 15:04:05", s) + if err != nil { + return err + } + *dest = NullTime(t) + return nil + case []uint8: + t, err := time.Parse("2006-01-02 15:04:05", string(s)) + if err != nil { + return err + } + *dest = NullTime(t) + return nil + case time.Time: + *dest = NullTime(s) + return nil + case nil: + default: + return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest) + } + return nil +} + +type EmptyScanner struct { +} + +func (EmptyScanner) Scan(src interface{}) error { + return nil +} diff --git a/vendor/xorm.io/xorm/core/stmt.go b/vendor/xorm.io/xorm/core/stmt.go new file mode 100644 index 0000000..d46ac9c --- /dev/null +++ b/vendor/xorm.io/xorm/core/stmt.go @@ -0,0 +1,194 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" + "errors" + "reflect" + + "xorm.io/xorm/contexts" +) + +// Stmt reprents a stmt objects +type Stmt struct { + *sql.Stmt + db *DB + names map[string]int + query string +} + +func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) { + names := make(map[string]int) + var i int + query = re.ReplaceAllStringFunc(query, func(src string) string { + names[src[1:]] = i + i++ + return "?" + }) + hookCtx := contexts.NewContextHook(ctx, "PREPARE", nil) + ctx, err := db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + stmt, err := db.DB.PrepareContext(ctx, query) + hookCtx.End(ctx, nil, err) + if err := db.afterProcess(hookCtx); err != nil { + return nil, err + } + return &Stmt{stmt, db, names, query}, nil +} + +func (db *DB) Prepare(query string) (*Stmt, error) { + return db.PrepareContext(context.Background(), query) +} + +func (s *Stmt) ExecMapContext(ctx context.Context, mp interface{}) (sql.Result, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + return s.ExecContext(ctx, args...) +} + +func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) { + return s.ExecMapContext(context.Background(), mp) +} + +func (s *Stmt) ExecStructContext(ctx context.Context, st interface{}) (sql.Result, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + return s.ExecContext(ctx, args...) +} + +func (s *Stmt) ExecStruct(st interface{}) (sql.Result, error) { + return s.ExecStructContext(context.Background(), st) +} + +func (s *Stmt) ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error) { + hookCtx := contexts.NewContextHook(ctx, s.query, args) + ctx, err := s.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + res, err := s.Stmt.ExecContext(ctx, args) + hookCtx.End(ctx, res, err) + if err := s.db.afterProcess(hookCtx); err != nil { + return nil, err + } + return res, nil +} + +func (s *Stmt) QueryContext(ctx context.Context, args ...interface{}) (*Rows, error) { + hookCtx := contexts.NewContextHook(ctx, s.query, args) + ctx, err := s.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + rows, err := s.Stmt.QueryContext(ctx, args...) + hookCtx.End(ctx, nil, err) + if err := s.db.afterProcess(hookCtx); err != nil { + return nil, err + } + return &Rows{rows, s.db}, nil +} + +func (s *Stmt) Query(args ...interface{}) (*Rows, error) { + return s.QueryContext(context.Background(), args...) +} + +func (s *Stmt) QueryMapContext(ctx context.Context, mp interface{}) (*Rows, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + + return s.QueryContext(ctx, args...) +} + +func (s *Stmt) QueryMap(mp interface{}) (*Rows, error) { + return s.QueryMapContext(context.Background(), mp) +} + +func (s *Stmt) QueryStructContext(ctx context.Context, st interface{}) (*Rows, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + + return s.QueryContext(ctx, args...) +} + +func (s *Stmt) QueryStruct(st interface{}) (*Rows, error) { + return s.QueryStructContext(context.Background(), st) +} + +func (s *Stmt) QueryRowContext(ctx context.Context, args ...interface{}) *Row { + rows, err := s.QueryContext(ctx, args...) + return &Row{rows, err} +} + +func (s *Stmt) QueryRow(args ...interface{}) *Row { + return s.QueryRowContext(context.Background(), args...) +} + +func (s *Stmt) QueryRowMapContext(ctx context.Context, mp interface{}) *Row { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return &Row{nil, errors.New("mp should be a map's pointer")} + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + + return s.QueryRowContext(ctx, args...) +} + +func (s *Stmt) QueryRowMap(mp interface{}) *Row { + return s.QueryRowMapContext(context.Background(), mp) +} + +func (s *Stmt) QueryRowStructContext(ctx context.Context, st interface{}) *Row { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return &Row{nil, errors.New("st should be a struct's pointer")} + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + + return s.QueryRowContext(ctx, args...) +} + +func (s *Stmt) QueryRowStruct(st interface{}) *Row { + return s.QueryRowStructContext(context.Background(), st) +} diff --git a/vendor/xorm.io/xorm/core/tx.go b/vendor/xorm.io/xorm/core/tx.go new file mode 100644 index 0000000..a85a687 --- /dev/null +++ b/vendor/xorm.io/xorm/core/tx.go @@ -0,0 +1,219 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" + + "xorm.io/xorm/contexts" +) + +var ( + _ QueryExecuter = &Tx{} +) + +// Tx represents a transaction +type Tx struct { + *sql.Tx + db *DB + ctx context.Context +} + +func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + hookCtx := contexts.NewContextHook(ctx, "BEGIN TRANSACTION", nil) + ctx, err := db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + tx, err := db.DB.BeginTx(ctx, opts) + hookCtx.End(ctx, nil, err) + if err := db.afterProcess(hookCtx); err != nil { + return nil, err + } + return &Tx{tx, db, ctx}, nil +} + +func (db *DB) Begin() (*Tx, error) { + return db.BeginTx(context.Background(), nil) +} + +func (tx *Tx) Commit() error { + hookCtx := contexts.NewContextHook(tx.ctx, "COMMIT", nil) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return err + } + err = tx.Tx.Commit() + hookCtx.End(ctx, nil, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + return err + } + return nil +} + +func (tx *Tx) Rollback() error { + hookCtx := contexts.NewContextHook(tx.ctx, "ROLLBACK", nil) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return err + } + err = tx.Tx.Rollback() + hookCtx.End(ctx, nil, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + return err + } + return nil +} + +func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) { + names := make(map[string]int) + var i int + query = re.ReplaceAllStringFunc(query, func(src string) string { + names[src[1:]] = i + i++ + return "?" + }) + hookCtx := contexts.NewContextHook(ctx, "PREPARE", nil) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + stmt, err := tx.Tx.PrepareContext(ctx, query) + hookCtx.End(ctx, nil, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + return nil, err + } + return &Stmt{stmt, tx.db, names, query}, nil +} + +func (tx *Tx) Prepare(query string) (*Stmt, error) { + return tx.PrepareContext(context.Background(), query) +} + +func (tx *Tx) StmtContext(ctx context.Context, stmt *Stmt) *Stmt { + stmt.Stmt = tx.Tx.StmtContext(ctx, stmt.Stmt) + return stmt +} + +func (tx *Tx) Stmt(stmt *Stmt) *Stmt { + return tx.StmtContext(context.Background(), stmt) +} + +func (tx *Tx) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return tx.ExecContext(ctx, query, args...) +} + +func (tx *Tx) ExecMap(query string, mp interface{}) (sql.Result, error) { + return tx.ExecMapContext(context.Background(), query, mp) +} + +func (tx *Tx) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return tx.ExecContext(ctx, query, args...) +} + +func (tx *Tx) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + hookCtx := contexts.NewContextHook(ctx, query, args) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + res, err := tx.Tx.ExecContext(ctx, query, args...) + hookCtx.End(ctx, res, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + return nil, err + } + return res, err +} + +func (tx *Tx) ExecStruct(query string, st interface{}) (sql.Result, error) { + return tx.ExecStructContext(context.Background(), query, st) +} + +func (tx *Tx) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + hookCtx := contexts.NewContextHook(ctx, query, args) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + rows, err := tx.Tx.QueryContext(ctx, query, args...) + hookCtx.End(ctx, nil, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + if rows != nil { + rows.Close() + } + return nil, err + } + return &Rows{rows, tx.db}, nil +} + +func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) { + return tx.QueryContext(context.Background(), query, args...) +} + +func (tx *Tx) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return tx.QueryContext(ctx, query, args...) +} + +func (tx *Tx) QueryMap(query string, mp interface{}) (*Rows, error) { + return tx.QueryMapContext(context.Background(), query, mp) +} + +func (tx *Tx) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return tx.QueryContext(ctx, query, args...) +} + +func (tx *Tx) QueryStruct(query string, st interface{}) (*Rows, error) { + return tx.QueryStructContext(context.Background(), query, st) +} + +func (tx *Tx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.QueryContext(ctx, query, args...) + return &Row{rows, err} +} + +func (tx *Tx) QueryRow(query string, args ...interface{}) *Row { + return tx.QueryRowContext(context.Background(), query, args...) +} + +func (tx *Tx) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row { + query, args, err := MapToSlice(query, mp) + if err != nil { + return &Row{nil, err} + } + return tx.QueryRowContext(ctx, query, args...) +} + +func (tx *Tx) QueryRowMap(query string, mp interface{}) *Row { + return tx.QueryRowMapContext(context.Background(), query, mp) +} + +func (tx *Tx) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row { + query, args, err := StructToSlice(query, st) + if err != nil { + return &Row{nil, err} + } + return tx.QueryRowContext(ctx, query, args...) +} + +func (tx *Tx) QueryRowStruct(query string, st interface{}) *Row { + return tx.QueryRowStructContext(context.Background(), query, st) +} diff --git a/vendor/xorm.io/xorm/dialects/dialect.go b/vendor/xorm.io/xorm/dialects/dialect.go new file mode 100644 index 0000000..dc96f73 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/dialect.go @@ -0,0 +1,284 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "fmt" + "strings" + "time" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +// URI represents an uri to visit database +type URI struct { + DBType schemas.DBType + Proto string + Host string + Port string + DBName string + User string + Passwd string + Charset string + Laddr string + Raddr string + Timeout time.Duration + Schema string +} + +// SetSchema set schema +func (uri *URI) SetSchema(schema string) { + // hack me + if uri.DBType == schemas.POSTGRES { + uri.Schema = strings.TrimSpace(schema) + } +} + +// Dialect represents a kind of database +type Dialect interface { + Init(*URI) error + URI() *URI + SQLType(*schemas.Column) string + FormatBytes(b []byte) string + + IsReserved(string) bool + Quoter() schemas.Quoter + SetQuotePolicy(quotePolicy QuotePolicy) + + AutoIncrStr() string + + GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) + IndexCheckSQL(tableName, idxName string) (string, []interface{}) + CreateIndexSQL(tableName string, index *schemas.Index) string + DropIndexSQL(tableName string, index *schemas.Index) string + + GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) + IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) + CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) + DropTableSQL(tableName string) (string, bool) + + GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) + IsColumnExist(queryer core.Queryer, ctx context.Context, tableName string, colName string) (bool, error) + AddColumnSQL(tableName string, col *schemas.Column) string + ModifyColumnSQL(tableName string, col *schemas.Column) string + + ForUpdateSQL(query string) string + + Filters() []Filter + SetParams(params map[string]string) +} + +// Base represents a basic dialect and all real dialects could embed this struct +type Base struct { + dialect Dialect + uri *URI + quoter schemas.Quoter +} + +func (b *Base) Quoter() schemas.Quoter { + return b.quoter +} + +func (b *Base) Init(dialect Dialect, uri *URI) error { + b.dialect, b.uri = dialect, uri + return nil +} + +func (b *Base) URI() *URI { + return b.uri +} + +func (b *Base) DBType() schemas.DBType { + return b.uri.DBType +} + +func (b *Base) FormatBytes(bs []byte) string { + return fmt.Sprintf("0x%x", bs) +} + +func (db *Base) DropTableSQL(tableName string) (string, bool) { + quote := db.dialect.Quoter().Quote + return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName)), true +} + +func (db *Base) HasRecords(queryer core.Queryer, ctx context.Context, query string, args ...interface{}) (bool, error) { + rows, err := queryer.QueryContext(ctx, query, args...) + if err != nil { + return false, err + } + defer rows.Close() + + if rows.Next() { + return true, nil + } + return false, nil +} + +func (db *Base) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + quote := db.dialect.Quoter().Quote + query := fmt.Sprintf( + "SELECT %v FROM %v.%v WHERE %v = ? AND %v = ? AND %v = ?", + quote("COLUMN_NAME"), + quote("INFORMATION_SCHEMA"), + quote("COLUMNS"), + quote("TABLE_SCHEMA"), + quote("TABLE_NAME"), + quote("COLUMN_NAME"), + ) + return db.HasRecords(queryer, ctx, query, db.uri.DBName, tableName, colName) +} + +func (db *Base) AddColumnSQL(tableName string, col *schemas.Column) string { + s, _ := ColumnString(db.dialect, col, true) + return fmt.Sprintf("ALTER TABLE %v ADD %v", db.dialect.Quoter().Quote(tableName), s) +} + +func (db *Base) CreateIndexSQL(tableName string, index *schemas.Index) string { + quoter := db.dialect.Quoter() + var unique string + var idxName string + if index.Type == schemas.UniqueType { + unique = " UNIQUE" + } + idxName = index.XName(tableName) + return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v)", unique, + quoter.Quote(idxName), quoter.Quote(tableName), + quoter.Join(index.Cols, ",")) +} + +func (db *Base) DropIndexSQL(tableName string, index *schemas.Index) string { + quote := db.dialect.Quoter().Quote + var name string + if index.IsRegular { + name = index.XName(tableName) + } else { + name = index.Name + } + return fmt.Sprintf("DROP INDEX %v ON %s", quote(name), quote(tableName)) +} + +func (db *Base) ModifyColumnSQL(tableName string, col *schemas.Column) string { + s, _ := ColumnString(db.dialect, col, false) + return fmt.Sprintf("alter table %s MODIFY COLUMN %s", tableName, s) +} + +func (b *Base) ForUpdateSQL(query string) string { + return query + " FOR UPDATE" +} + +func (b *Base) SetParams(params map[string]string) { +} + +var ( + dialects = map[string]func() Dialect{} +) + +// RegisterDialect register database dialect +func RegisterDialect(dbName schemas.DBType, dialectFunc func() Dialect) { + if dialectFunc == nil { + panic("core: Register dialect is nil") + } + dialects[strings.ToLower(string(dbName))] = dialectFunc // !nashtsai! allow override dialect +} + +// QueryDialect query if registered database dialect +func QueryDialect(dbName schemas.DBType) Dialect { + if d, ok := dialects[strings.ToLower(string(dbName))]; ok { + return d() + } + return nil +} + +func regDrvsNDialects() bool { + providedDrvsNDialects := map[string]struct { + dbType schemas.DBType + getDriver func() Driver + getDialect func() Dialect + }{ + "mssql": {"mssql", func() Driver { return &odbcDriver{} }, func() Dialect { return &mssql{} }}, + "odbc": {"mssql", func() Driver { return &odbcDriver{} }, func() Dialect { return &mssql{} }}, // !nashtsai! TODO change this when supporting MS Access + "mysql": {"mysql", func() Driver { return &mysqlDriver{} }, func() Dialect { return &mysql{} }}, + "mymysql": {"mysql", func() Driver { return &mymysqlDriver{} }, func() Dialect { return &mysql{} }}, + "postgres": {"postgres", func() Driver { return &pqDriver{} }, func() Dialect { return &postgres{} }}, + "pgx": {"postgres", func() Driver { return &pqDriverPgx{} }, func() Dialect { return &postgres{} }}, + "sqlite3": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }}, + "oci8": {"oracle", func() Driver { return &oci8Driver{} }, func() Dialect { return &oracle{} }}, + "goracle": {"oracle", func() Driver { return &goracleDriver{} }, func() Dialect { return &oracle{} }}, + } + + for driverName, v := range providedDrvsNDialects { + if driver := QueryDriver(driverName); driver == nil { + RegisterDriver(driverName, v.getDriver()) + RegisterDialect(v.dbType, v.getDialect) + } + } + return true +} + +func init() { + regDrvsNDialects() +} + +// ColumnString generate column description string according dialect +func ColumnString(dialect Dialect, col *schemas.Column, includePrimaryKey bool) (string, error) { + bd := strings.Builder{} + + if err := dialect.Quoter().QuoteTo(&bd, col.Name); err != nil { + return "", err + } + + if err := bd.WriteByte(' '); err != nil { + return "", err + } + + if _, err := bd.WriteString(dialect.SQLType(col)); err != nil { + return "", err + } + + if err := bd.WriteByte(' '); err != nil { + return "", err + } + + if includePrimaryKey && col.IsPrimaryKey { + if _, err := bd.WriteString("PRIMARY KEY "); err != nil { + return "", err + } + + if col.IsAutoIncrement { + if _, err := bd.WriteString(dialect.AutoIncrStr()); err != nil { + return "", err + } + if err := bd.WriteByte(' '); err != nil { + return "", err + } + } + } + + if col.Default != "" { + if _, err := bd.WriteString("DEFAULT "); err != nil { + return "", err + } + if _, err := bd.WriteString(col.Default); err != nil { + return "", err + } + if err := bd.WriteByte(' '); err != nil { + return "", err + } + } + + if col.Nullable { + if _, err := bd.WriteString("NULL "); err != nil { + return "", err + } + } else { + if _, err := bd.WriteString("NOT NULL "); err != nil { + return "", err + } + } + + return bd.String(), nil +} diff --git a/vendor/xorm.io/xorm/dialects/driver.go b/vendor/xorm.io/xorm/dialects/driver.go new file mode 100644 index 0000000..ae3afe4 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/driver.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" +) + +type Driver interface { + Parse(string, string) (*URI, error) +} + +var ( + drivers = map[string]Driver{} +) + +func RegisterDriver(driverName string, driver Driver) { + if driver == nil { + panic("core: Register driver is nil") + } + if _, dup := drivers[driverName]; dup { + panic("core: Register called twice for driver " + driverName) + } + drivers[driverName] = driver +} + +func QueryDriver(driverName string) Driver { + return drivers[driverName] +} + +func RegisteredDriverSize() int { + return len(drivers) +} + +// OpenDialect opens a dialect via driver name and connection string +func OpenDialect(driverName, connstr string) (Dialect, error) { + driver := QueryDriver(driverName) + if driver == nil { + return nil, fmt.Errorf("Unsupported driver name: %v", driverName) + } + + uri, err := driver.Parse(driverName, connstr) + if err != nil { + return nil, err + } + + dialect := QueryDialect(uri.DBType) + if dialect == nil { + return nil, fmt.Errorf("Unsupported dialect type: %v", uri.DBType) + } + + dialect.Init(uri) + + return dialect, nil +} diff --git a/vendor/xorm.io/xorm/dialects/filter.go b/vendor/xorm.io/xorm/dialects/filter.go new file mode 100644 index 0000000..6968b6c --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/filter.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" + "strings" +) + +// Filter is an interface to filter SQL +type Filter interface { + Do(sql string) string +} + +// SeqFilter filter SQL replace ?, ? ... to $1, $2 ... +type SeqFilter struct { + Prefix string + Start int +} + +func convertQuestionMark(sql, prefix string, start int) string { + var buf strings.Builder + var beginSingleQuote bool + var index = start + for _, c := range sql { + if !beginSingleQuote && c == '?' { + buf.WriteString(fmt.Sprintf("%s%v", prefix, index)) + index++ + } else { + if c == '\'' { + beginSingleQuote = !beginSingleQuote + } + buf.WriteRune(c) + } + } + return buf.String() +} + +func (s *SeqFilter) Do(sql string) string { + return convertQuestionMark(sql, s.Prefix, s.Start) +} diff --git a/vendor/xorm.io/xorm/dialects/gen_reserved.sh b/vendor/xorm.io/xorm/dialects/gen_reserved.sh new file mode 100644 index 0000000..434a1bf --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/gen_reserved.sh @@ -0,0 +1,6 @@ +#!/bin/bash +if [ -f $1 ];then + cat $1| awk '{printf("\""$1"\":true,\n")}' +else + echo "argument $1 if not a file!" +fi diff --git a/vendor/xorm.io/xorm/dialects/mssql.go b/vendor/xorm.io/xorm/dialects/mssql.go new file mode 100644 index 0000000..d76a8c6 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/mssql.go @@ -0,0 +1,596 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +var ( + mssqlReservedWords = map[string]bool{ + "ADD": true, + "EXTERNAL": true, + "PROCEDURE": true, + "ALL": true, + "FETCH": true, + "PUBLIC": true, + "ALTER": true, + "FILE": true, + "RAISERROR": true, + "AND": true, + "FILLFACTOR": true, + "READ": true, + "ANY": true, + "FOR": true, + "READTEXT": true, + "AS": true, + "FOREIGN": true, + "RECONFIGURE": true, + "ASC": true, + "FREETEXT": true, + "REFERENCES": true, + "AUTHORIZATION": true, + "FREETEXTTABLE": true, + "REPLICATION": true, + "BACKUP": true, + "FROM": true, + "RESTORE": true, + "BEGIN": true, + "FULL": true, + "RESTRICT": true, + "BETWEEN": true, + "FUNCTION": true, + "RETURN": true, + "BREAK": true, + "GOTO": true, + "REVERT": true, + "BROWSE": true, + "GRANT": true, + "REVOKE": true, + "BULK": true, + "GROUP": true, + "RIGHT": true, + "BY": true, + "HAVING": true, + "ROLLBACK": true, + "CASCADE": true, + "HOLDLOCK": true, + "ROWCOUNT": true, + "CASE": true, + "IDENTITY": true, + "ROWGUIDCOL": true, + "CHECK": true, + "IDENTITY_INSERT": true, + "RULE": true, + "CHECKPOINT": true, + "IDENTITYCOL": true, + "SAVE": true, + "CLOSE": true, + "IF": true, + "SCHEMA": true, + "CLUSTERED": true, + "IN": true, + "SECURITYAUDIT": true, + "COALESCE": true, + "INDEX": true, + "SELECT": true, + "COLLATE": true, + "INNER": true, + "SEMANTICKEYPHRASETABLE": true, + "COLUMN": true, + "INSERT": true, + "SEMANTICSIMILARITYDETAILSTABLE": true, + "COMMIT": true, + "INTERSECT": true, + "SEMANTICSIMILARITYTABLE": true, + "COMPUTE": true, + "INTO": true, + "SESSION_USER": true, + "CONSTRAINT": true, + "IS": true, + "SET": true, + "CONTAINS": true, + "JOIN": true, + "SETUSER": true, + "CONTAINSTABLE": true, + "KEY": true, + "SHUTDOWN": true, + "CONTINUE": true, + "KILL": true, + "SOME": true, + "CONVERT": true, + "LEFT": true, + "STATISTICS": true, + "CREATE": true, + "LIKE": true, + "SYSTEM_USER": true, + "CROSS": true, + "LINENO": true, + "TABLE": true, + "CURRENT": true, + "LOAD": true, + "TABLESAMPLE": true, + "CURRENT_DATE": true, + "MERGE": true, + "TEXTSIZE": true, + "CURRENT_TIME": true, + "NATIONAL": true, + "THEN": true, + "CURRENT_TIMESTAMP": true, + "NOCHECK": true, + "TO": true, + "CURRENT_USER": true, + "NONCLUSTERED": true, + "TOP": true, + "CURSOR": true, + "NOT": true, + "TRAN": true, + "DATABASE": true, + "NULL": true, + "TRANSACTION": true, + "DBCC": true, + "NULLIF": true, + "TRIGGER": true, + "DEALLOCATE": true, + "OF": true, + "TRUNCATE": true, + "DECLARE": true, + "OFF": true, + "TRY_CONVERT": true, + "DEFAULT": true, + "OFFSETS": true, + "TSEQUAL": true, + "DELETE": true, + "ON": true, + "UNION": true, + "DENY": true, + "OPEN": true, + "UNIQUE": true, + "DESC": true, + "OPENDATASOURCE": true, + "UNPIVOT": true, + "DISK": true, + "OPENQUERY": true, + "UPDATE": true, + "DISTINCT": true, + "OPENROWSET": true, + "UPDATETEXT": true, + "DISTRIBUTED": true, + "OPENXML": true, + "USE": true, + "DOUBLE": true, + "OPTION": true, + "USER": true, + "DROP": true, + "OR": true, + "VALUES": true, + "DUMP": true, + "ORDER": true, + "VARYING": true, + "ELSE": true, + "OUTER": true, + "VIEW": true, + "END": true, + "OVER": true, + "WAITFOR": true, + "ERRLVL": true, + "PERCENT": true, + "WHEN": true, + "ESCAPE": true, + "PIVOT": true, + "WHERE": true, + "EXCEPT": true, + "PLAN": true, + "WHILE": true, + "EXEC": true, + "PRECISION": true, + "WITH": true, + "EXECUTE": true, + "PRIMARY": true, + "WITHIN": true, + "EXISTS": true, + "PRINT": true, + "WRITETEXT": true, + "EXIT": true, + "PROC": true, + } + + mssqlQuoter = schemas.Quoter{ + Prefix: '[', + Suffix: ']', + IsReserved: schemas.AlwaysReserve, + } +) + +type mssql struct { + Base + defaultVarchar string + defaultChar string +} + +func (db *mssql) Init(uri *URI) error { + db.quoter = mssqlQuoter + return db.Base.Init(db, uri) +} + +func (db *mssql) SetParams(params map[string]string) { + defaultVarchar, ok := params["DEFAULT_VARCHAR"] + if ok { + var t = strings.ToUpper(defaultVarchar) + switch t { + case "NVARCHAR", "VARCHAR": + db.defaultVarchar = defaultVarchar + default: + db.defaultVarchar = "VARCHAR" + } + } else { + db.defaultVarchar = "VARCHAR" + } + + defaultChar, ok := params["DEFAULT_CHAR"] + if ok { + var t = strings.ToUpper(defaultChar) + switch t { + case "NCHAR", "CHAR": + db.defaultChar = defaultChar + default: + db.defaultChar = "CHAR" + } + } else { + db.defaultChar = "CHAR" + } +} + +func (db *mssql) SQLType(c *schemas.Column) string { + var res string + switch t := c.SQLType.Name; t { + case schemas.Bool: + res = schemas.Bit + if strings.EqualFold(c.Default, "true") { + c.Default = "1" + } else if strings.EqualFold(c.Default, "false") { + c.Default = "0" + } + return res + case schemas.Serial: + c.IsAutoIncrement = true + c.IsPrimaryKey = true + c.Nullable = false + res = schemas.Int + case schemas.BigSerial: + c.IsAutoIncrement = true + c.IsPrimaryKey = true + c.Nullable = false + res = schemas.BigInt + case schemas.Bytea, schemas.Blob, schemas.Binary, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob: + res = schemas.VarBinary + if c.Length == 0 { + c.Length = 50 + } + case schemas.TimeStamp: + res = schemas.DateTime + case schemas.TimeStampz: + res = "DATETIMEOFFSET" + c.Length = 7 + case schemas.MediumInt: + res = schemas.Int + case schemas.Text, schemas.MediumText, schemas.TinyText, schemas.LongText, schemas.Json: + res = schemas.Varchar + "(MAX)" + case schemas.Double: + res = schemas.Real + case schemas.Uuid: + res = schemas.Varchar + c.Length = 40 + case schemas.TinyInt: + res = schemas.TinyInt + c.Length = 0 + case schemas.BigInt: + res = schemas.BigInt + c.Length = 0 + case schemas.Varchar: + res = db.defaultVarchar + case schemas.Char: + res = db.defaultChar + default: + res = t + } + + if res == schemas.Int || res == schemas.Bit || res == schemas.DateTime { + return res + } + + hasLen1 := (c.Length > 0) + hasLen2 := (c.Length2 > 0) + + if hasLen2 { + res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" + } else if hasLen1 { + res += "(" + strconv.Itoa(c.Length) + ")" + } + return res +} + +func (db *mssql) IsReserved(name string) bool { + _, ok := mssqlReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *mssql) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = mssqlQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = mssqlQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = mssqlQuoter + } +} + +func (db *mssql) AutoIncrStr() string { + return "IDENTITY" +} + +func (db *mssql) DropTableSQL(tableName string) (string, bool) { + return fmt.Sprintf("IF EXISTS (SELECT * FROM sysobjects WHERE id = "+ + "object_id(N'%s') and OBJECTPROPERTY(id, N'IsUserTable') = 1) "+ + "DROP TABLE \"%s\"", tableName, tableName), true +} + +func (db *mssql) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{idxName} + sql := "select name from sysindexes where id=object_id('" + tableName + "') and name=?" + return sql, args +} + +func (db *mssql) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + query := `SELECT "COLUMN_NAME" FROM "INFORMATION_SCHEMA"."COLUMNS" WHERE "TABLE_NAME" = ? AND "COLUMN_NAME" = ?` + + return db.HasRecords(queryer, ctx, query, tableName, colName) +} + +func (db *mssql) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + sql := "select * from sysobjects where id = object_id(N'" + tableName + "') and OBJECTPROPERTY(id, N'IsUserTable') = 1" + return db.HasRecords(queryer, ctx, sql) +} + +func (db *mssql) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{} + s := `select a.name as name, b.name as ctype,a.max_length,a.precision,a.scale,a.is_nullable as nullable, + "default_is_null" = (CASE WHEN c.text is null THEN 1 ELSE 0 END), + replace(replace(isnull(c.text,''),'(',''),')','') as vdefault, + ISNULL(p.is_primary_key, 0), a.is_identity as is_identity + from sys.columns a + left join sys.types b on a.user_type_id=b.user_type_id + left join sys.syscomments c on a.default_object_id=c.id + LEFT OUTER JOIN (SELECT i.object_id, ic.column_id, i.is_primary_key + FROM sys.indexes i + LEFT JOIN sys.index_columns ic ON ic.object_id = i.object_id AND ic.index_id = i.index_id + WHERE i.is_primary_key = 1 + ) as p on p.object_id = a.object_id AND p.column_id = a.column_id + where a.object_id=object_id('` + tableName + `')` + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + for rows.Next() { + var name, ctype, vdefault string + var maxLen, precision, scale int + var nullable, isPK, defaultIsNull, isIncrement bool + err = rows.Scan(&name, &ctype, &maxLen, &precision, &scale, &nullable, &defaultIsNull, &vdefault, &isPK, &isIncrement) + if err != nil { + return nil, nil, err + } + + col := new(schemas.Column) + col.Indexes = make(map[string]int) + col.Name = strings.Trim(name, "` ") + col.Nullable = nullable + col.DefaultIsEmpty = defaultIsNull + if !defaultIsNull { + col.Default = vdefault + } + col.IsPrimaryKey = isPK + col.IsAutoIncrement = isIncrement + ct := strings.ToUpper(ctype) + if ct == "DECIMAL" { + col.Length = precision + col.Length2 = scale + } else { + col.Length = maxLen + } + switch ct { + case "DATETIMEOFFSET": + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + case "NVARCHAR": + col.SQLType = schemas.SQLType{Name: schemas.NVarchar, DefaultLength: 0, DefaultLength2: 0} + case "IMAGE": + col.SQLType = schemas.SQLType{Name: schemas.VarBinary, DefaultLength: 0, DefaultLength2: 0} + default: + if _, ok := schemas.SqlTypes[ct]; ok { + col.SQLType = schemas.SQLType{Name: ct, DefaultLength: 0, DefaultLength2: 0} + } else { + return nil, nil, fmt.Errorf("Unknown colType %v for %v - %v", ct, tableName, col.Name) + } + } + + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + return colSeq, cols, nil +} + +func (db *mssql) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{} + s := `select name from sysobjects where xtype ='U'` + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + var name string + err = rows.Scan(&name) + if err != nil { + return nil, err + } + table.Name = strings.Trim(name, "` ") + tables = append(tables, table) + } + return tables, nil +} + +func (db *mssql) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{tableName} + s := `SELECT +IXS.NAME AS [INDEX_NAME], +C.NAME AS [COLUMN_NAME], +IXS.is_unique AS [IS_UNIQUE] +FROM SYS.INDEXES IXS +INNER JOIN SYS.INDEX_COLUMNS IXCS +ON IXS.OBJECT_ID=IXCS.OBJECT_ID AND IXS.INDEX_ID = IXCS.INDEX_ID +INNER JOIN SYS.COLUMNS C ON IXS.OBJECT_ID=C.OBJECT_ID +AND IXCS.COLUMN_ID=C.COLUMN_ID +WHERE IXS.TYPE_DESC='NONCLUSTERED' and OBJECT_NAME(IXS.OBJECT_ID) =? +` + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var indexType int + var indexName, colName, isUnique string + + err = rows.Scan(&indexName, &colName, &isUnique) + if err != nil { + return nil, err + } + + i, err := strconv.ParseBool(isUnique) + if err != nil { + return nil, err + } + + if i { + indexType = schemas.UniqueType + } else { + indexType = schemas.IndexType + } + + colName = strings.Trim(colName, "` ") + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + indexName = indexName[5+len(tableName):] + isRegular = true + } + + var index *schemas.Index + var ok bool + if index, ok = indexes[indexName]; !ok { + index = new(schemas.Index) + index.Type = indexType + index.Name = indexName + index.IsRegular = isRegular + indexes[indexName] = index + } + index.AddColumn(colName) + } + return indexes, nil +} + +func (db *mssql) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql string + if tableName == "" { + tableName = table.Name + } + + sql = "IF NOT EXISTS (SELECT [name] FROM sys.tables WHERE [name] = '" + tableName + "' ) CREATE TABLE " + + sql += db.Quoter().Quote(tableName) + " (" + + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + s, _ := ColumnString(db, col, col.IsPrimaryKey && len(pkList) == 1) + sql += s + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += strings.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + ")" + sql += ";" + return []string{sql}, true +} + +func (db *mssql) ForUpdateSQL(query string) string { + return query +} + +func (db *mssql) Filters() []Filter { + return []Filter{} +} + +type odbcDriver struct { +} + +func (p *odbcDriver) Parse(driverName, dataSourceName string) (*URI, error) { + var dbName string + + if strings.HasPrefix(dataSourceName, "sqlserver://") { + u, err := url.Parse(dataSourceName) + if err != nil { + return nil, err + } + dbName = u.Query().Get("database") + } else { + kv := strings.Split(dataSourceName, ";") + for _, c := range kv { + vv := strings.Split(strings.TrimSpace(c), "=") + if len(vv) == 2 { + switch strings.ToLower(vv[0]) { + case "database": + dbName = vv[1] + } + } + } + } + if dbName == "" { + return nil, errors.New("no db name provided") + } + return &URI{DBName: dbName, DBType: schemas.MSSQL}, nil +} diff --git a/vendor/xorm.io/xorm/dialects/mysql.go b/vendor/xorm.io/xorm/dialects/mysql.go new file mode 100644 index 0000000..32e18a1 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/mysql.go @@ -0,0 +1,671 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +var ( + mysqlReservedWords = map[string]bool{ + "ADD": true, + "ALL": true, + "ALTER": true, + "ANALYZE": true, + "AND": true, + "AS": true, + "ASC": true, + "ASENSITIVE": true, + "BEFORE": true, + "BETWEEN": true, + "BIGINT": true, + "BINARY": true, + "BLOB": true, + "BOTH": true, + "BY": true, + "CALL": true, + "CASCADE": true, + "CASE": true, + "CHANGE": true, + "CHAR": true, + "CHARACTER": true, + "CHECK": true, + "COLLATE": true, + "COLUMN": true, + "CONDITION": true, + "CONNECTION": true, + "CONSTRAINT": true, + "CONTINUE": true, + "CONVERT": true, + "CREATE": true, + "CROSS": true, + "CURRENT_DATE": true, + "CURRENT_TIME": true, + "CURRENT_TIMESTAMP": true, + "CURRENT_USER": true, + "CURSOR": true, + "DATABASE": true, + "DATABASES": true, + "DAY_HOUR": true, + "DAY_MICROSECOND": true, + "DAY_MINUTE": true, + "DAY_SECOND": true, + "DEC": true, + "DECIMAL": true, + "DECLARE": true, + "DEFAULT": true, + "DELAYED": true, + "DELETE": true, + "DESC": true, + "DESCRIBE": true, + "DETERMINISTIC": true, + "DISTINCT": true, + "DISTINCTROW": true, + "DIV": true, + "DOUBLE": true, + "DROP": true, + "DUAL": true, + "EACH": true, + "ELSE": true, + "ELSEIF": true, + "ENCLOSED": true, + "ESCAPED": true, + "EXISTS": true, + "EXIT": true, + "EXPLAIN": true, + "FALSE": true, + "FETCH": true, + "FLOAT": true, + "FLOAT4": true, + "FLOAT8": true, + "FOR": true, + "FORCE": true, + "FOREIGN": true, + "FROM": true, + "FULLTEXT": true, + "GOTO": true, + "GRANT": true, + "GROUP": true, + "HAVING": true, + "HIGH_PRIORITY": true, + "HOUR_MICROSECOND": true, + "HOUR_MINUTE": true, + "HOUR_SECOND": true, + "IF": true, + "IGNORE": true, + "IN": true, "INDEX": true, + "INFILE": true, "INNER": true, "INOUT": true, + "INSENSITIVE": true, "INSERT": true, "INT": true, + "INT1": true, "INT2": true, "INT3": true, + "INT4": true, "INT8": true, "INTEGER": true, + "INTERVAL": true, "INTO": true, "IS": true, + "ITERATE": true, "JOIN": true, "KEY": true, + "KEYS": true, "KILL": true, "LABEL": true, + "LEADING": true, "LEAVE": true, "LEFT": true, + "LIKE": true, "LIMIT": true, "LINEAR": true, + "LINES": true, "LOAD": true, "LOCALTIME": true, + "LOCALTIMESTAMP": true, "LOCK": true, "LONG": true, + "LONGBLOB": true, "LONGTEXT": true, "LOOP": true, + "LOW_PRIORITY": true, "MATCH": true, "MEDIUMBLOB": true, + "MEDIUMINT": true, "MEDIUMTEXT": true, "MIDDLEINT": true, + "MINUTE_MICROSECOND": true, "MINUTE_SECOND": true, "MOD": true, + "MODIFIES": true, "NATURAL": true, "NOT": true, + "NO_WRITE_TO_BINLOG": true, "NULL": true, "NUMERIC": true, + "ON OPTIMIZE": true, "OPTION": true, + "OPTIONALLY": true, "OR": true, "ORDER": true, + "OUT": true, "OUTER": true, "OUTFILE": true, + "PRECISION": true, "PRIMARY": true, "PROCEDURE": true, + "PURGE": true, "RAID0": true, "RANGE": true, + "READ": true, "READS": true, "REAL": true, + "REFERENCES": true, "REGEXP": true, "RELEASE": true, + "RENAME": true, "REPEAT": true, "REPLACE": true, + "REQUIRE": true, "RESTRICT": true, "RETURN": true, + "REVOKE": true, "RIGHT": true, "RLIKE": true, + "SCHEMA": true, "SCHEMAS": true, "SECOND_MICROSECOND": true, + "SELECT": true, "SENSITIVE": true, "SEPARATOR": true, + "SET": true, "SHOW": true, "SMALLINT": true, + "SPATIAL": true, "SPECIFIC": true, "SQL": true, + "SQLEXCEPTION": true, "SQLSTATE": true, "SQLWARNING": true, + "SQL_BIG_RESULT": true, "SQL_CALC_FOUND_ROWS": true, "SQL_SMALL_RESULT": true, + "SSL": true, "STARTING": true, "STRAIGHT_JOIN": true, + "TABLE": true, "TERMINATED": true, "THEN": true, + "TINYBLOB": true, "TINYINT": true, "TINYTEXT": true, + "TO": true, "TRAILING": true, "TRIGGER": true, + "TRUE": true, "UNDO": true, "UNION": true, + "UNIQUE": true, "UNLOCK": true, "UNSIGNED": true, + "UPDATE": true, "USAGE": true, "USE": true, + "USING": true, "UTC_DATE": true, "UTC_TIME": true, + "UTC_TIMESTAMP": true, "VALUES": true, "VARBINARY": true, + "VARCHAR": true, + "VARCHARACTER": true, + "VARYING": true, + "WHEN": true, + "WHERE": true, + "WHILE": true, + "WITH": true, + "WRITE": true, + "X509": true, + "XOR": true, + "YEAR_MONTH": true, + "ZEROFILL": true, + } + + mysqlQuoter = schemas.Quoter{ + Prefix: '`', + Suffix: '`', + IsReserved: schemas.AlwaysReserve, + } +) + +type mysql struct { + Base + net string + addr string + params map[string]string + loc *time.Location + timeout time.Duration + tls *tls.Config + allowAllFiles bool + allowOldPasswords bool + clientFoundRows bool + rowFormat string +} + +func (db *mysql) Init(uri *URI) error { + db.quoter = mysqlQuoter + return db.Base.Init(db, uri) +} + +func (db *mysql) SetParams(params map[string]string) { + rowFormat, ok := params["rowFormat"] + if ok { + var t = strings.ToUpper(rowFormat) + switch t { + case "COMPACT": + fallthrough + case "REDUNDANT": + fallthrough + case "DYNAMIC": + fallthrough + case "COMPRESSED": + db.rowFormat = t + break + default: + break + } + } +} + +func (db *mysql) SQLType(c *schemas.Column) string { + var res string + switch t := c.SQLType.Name; t { + case schemas.Bool: + res = schemas.TinyInt + c.Length = 1 + case schemas.Serial: + c.IsAutoIncrement = true + c.IsPrimaryKey = true + c.Nullable = false + res = schemas.Int + case schemas.BigSerial: + c.IsAutoIncrement = true + c.IsPrimaryKey = true + c.Nullable = false + res = schemas.BigInt + case schemas.Bytea: + res = schemas.Blob + case schemas.TimeStampz: + res = schemas.Char + c.Length = 64 + case schemas.Enum: // mysql enum + res = schemas.Enum + res += "(" + opts := "" + for v := range c.EnumOptions { + opts += fmt.Sprintf(",'%v'", v) + } + res += strings.TrimLeft(opts, ",") + res += ")" + case schemas.Set: // mysql set + res = schemas.Set + res += "(" + opts := "" + for v := range c.SetOptions { + opts += fmt.Sprintf(",'%v'", v) + } + res += strings.TrimLeft(opts, ",") + res += ")" + case schemas.NVarchar: + res = schemas.Varchar + case schemas.Uuid: + res = schemas.Varchar + c.Length = 40 + case schemas.Json: + res = schemas.Text + default: + res = t + } + + hasLen1 := (c.Length > 0) + hasLen2 := (c.Length2 > 0) + + if res == schemas.BigInt && !hasLen1 && !hasLen2 { + c.Length = 20 + hasLen1 = true + } + + if hasLen2 { + res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" + } else if hasLen1 { + res += "(" + strconv.Itoa(c.Length) + ")" + } + return res +} + +func (db *mysql) IsReserved(name string) bool { + _, ok := mysqlReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *mysql) AutoIncrStr() string { + return "AUTO_INCREMENT" +} + +func (db *mysql) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{db.uri.DBName, tableName, idxName} + sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`" + sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?" + return sql, args +} + +func (db *mysql) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?" + return db.HasRecords(queryer, ctx, sql, db.uri.DBName, tableName) +} + +func (db *mysql) AddColumnSQL(tableName string, col *schemas.Column) string { + quoter := db.dialect.Quoter() + s, _ := ColumnString(db, col, true) + sql := fmt.Sprintf("ALTER TABLE %v ADD %v", quoter.Quote(tableName), s) + if len(col.Comment) > 0 { + sql += " COMMENT '" + col.Comment + "'" + } + return sql +} + +func (db *mysql) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{db.uri.DBName, tableName} + alreadyQuoted := "(INSTR(VERSION(), 'maria') > 0 && " + + "(SUBSTRING_INDEX(VERSION(), '.', 1) > 10 || " + + "(SUBSTRING_INDEX(VERSION(), '.', 1) = 10 && " + + "(SUBSTRING_INDEX(SUBSTRING(VERSION(), 4), '.', 1) > 2 || " + + "(SUBSTRING_INDEX(SUBSTRING(VERSION(), 4), '.', 1) = 2 && " + + "SUBSTRING_INDEX(SUBSTRING(VERSION(), 6), '-', 1) >= 7)))))" + s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," + + " `COLUMN_KEY`, `EXTRA`, `COLUMN_COMMENT`, " + + alreadyQuoted + " AS NEEDS_QUOTE " + + "FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" + + " ORDER BY `COLUMNS`.ORDINAL_POSITION" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + for rows.Next() { + col := new(schemas.Column) + col.Indexes = make(map[string]int) + + var columnName, isNullable, colType, colKey, extra, comment string + var alreadyQuoted bool + var colDefault *string + err = rows.Scan(&columnName, &isNullable, &colDefault, &colType, &colKey, &extra, &comment, &alreadyQuoted) + if err != nil { + return nil, nil, err + } + col.Name = strings.Trim(columnName, "` ") + col.Comment = comment + if "YES" == isNullable { + col.Nullable = true + } + + if colDefault != nil && (!alreadyQuoted || *colDefault != "NULL") { + col.Default = *colDefault + col.DefaultIsEmpty = false + } else { + col.DefaultIsEmpty = true + } + + cts := strings.Split(colType, "(") + colName := cts[0] + colType = strings.ToUpper(colName) + var len1, len2 int + if len(cts) == 2 { + idx := strings.Index(cts[1], ")") + if colType == schemas.Enum && cts[1][0] == '\'' { // enum + options := strings.Split(cts[1][0:idx], ",") + col.EnumOptions = make(map[string]int) + for k, v := range options { + v = strings.TrimSpace(v) + v = strings.Trim(v, "'") + col.EnumOptions[v] = k + } + } else if colType == schemas.Set && cts[1][0] == '\'' { + options := strings.Split(cts[1][0:idx], ",") + col.SetOptions = make(map[string]int) + for k, v := range options { + v = strings.TrimSpace(v) + v = strings.Trim(v, "'") + col.SetOptions[v] = k + } + } else { + lens := strings.Split(cts[1][0:idx], ",") + len1, err = strconv.Atoi(strings.TrimSpace(lens[0])) + if err != nil { + return nil, nil, err + } + if len(lens) == 2 { + len2, err = strconv.Atoi(lens[1]) + if err != nil { + return nil, nil, err + } + } + } + } + if colType == "FLOAT UNSIGNED" { + colType = "FLOAT" + } + if colType == "DOUBLE UNSIGNED" { + colType = "DOUBLE" + } + col.Length = len1 + col.Length2 = len2 + if _, ok := schemas.SqlTypes[colType]; ok { + col.SQLType = schemas.SQLType{Name: colType, DefaultLength: len1, DefaultLength2: len2} + } else { + return nil, nil, fmt.Errorf("Unknown colType %v", colType) + } + + if colKey == "PRI" { + col.IsPrimaryKey = true + } + if colKey == "UNI" { + // col.is + } + + if extra == "auto_increment" { + col.IsAutoIncrement = true + } + + if !col.DefaultIsEmpty { + if !alreadyQuoted && col.SQLType.IsText() { + col.Default = "'" + col.Default + "'" + } else if col.SQLType.IsTime() && !alreadyQuoted && col.Default != "CURRENT_TIMESTAMP" { + col.Default = "'" + col.Default + "'" + } + } + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + return colSeq, cols, nil +} + +func (db *mysql) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{db.uri.DBName} + s := "SELECT `TABLE_NAME`, `ENGINE`, `AUTO_INCREMENT`, `TABLE_COMMENT` from " + + "`INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? AND (`ENGINE`='MyISAM' OR `ENGINE` = 'InnoDB' OR `ENGINE` = 'TokuDB')" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + var name, engine string + var autoIncr, comment *string + err = rows.Scan(&name, &engine, &autoIncr, &comment) + if err != nil { + return nil, err + } + + table.Name = name + if comment != nil { + table.Comment = *comment + } + table.StoreEngine = engine + tables = append(tables, table) + } + return tables, nil +} + +func (db *mysql) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = mysqlQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = mysqlQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = mysqlQuoter + } +} + +func (db *mysql) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{db.uri.DBName, tableName} + s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var indexType int + var indexName, colName, nonUnique string + err = rows.Scan(&indexName, &nonUnique, &colName) + if err != nil { + return nil, err + } + + if indexName == "PRIMARY" { + continue + } + + if "YES" == nonUnique || nonUnique == "1" { + indexType = schemas.IndexType + } else { + indexType = schemas.UniqueType + } + + colName = strings.Trim(colName, "` ") + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + indexName = indexName[5+len(tableName):] + isRegular = true + } + + var index *schemas.Index + var ok bool + if index, ok = indexes[indexName]; !ok { + index = new(schemas.Index) + index.IsRegular = isRegular + index.Type = indexType + index.Name = indexName + indexes[indexName] = index + } + index.AddColumn(colName) + } + return indexes, nil +} + +func (db *mysql) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql = "CREATE TABLE IF NOT EXISTS " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + + sql += quoter.Quote(tableName) + sql += " (" + + if len(table.ColumnsSeq()) > 0 { + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + s, _ := ColumnString(db, col, col.IsPrimaryKey && len(pkList) == 1) + sql += s + sql = strings.TrimSpace(sql) + if len(col.Comment) > 0 { + sql += " COMMENT '" + col.Comment + "'" + } + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + } + sql += ")" + + if table.StoreEngine != "" { + sql += " ENGINE=" + table.StoreEngine + } + + var charset = table.Charset + if len(charset) == 0 { + charset = db.URI().Charset + } + if len(charset) != 0 { + sql += " DEFAULT CHARSET " + charset + } + + if db.rowFormat != "" { + sql += " ROW_FORMAT=" + db.rowFormat + } + return []string{sql}, true +} + +func (db *mysql) Filters() []Filter { + return []Filter{} +} + +type mymysqlDriver struct { +} + +func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*URI, error) { + uri := &URI{DBType: schemas.MYSQL} + + pd := strings.SplitN(dataSourceName, "*", 2) + if len(pd) == 2 { + // Parse protocol part of URI + p := strings.SplitN(pd[0], ":", 2) + if len(p) != 2 { + return nil, errors.New("Wrong protocol part of URI") + } + uri.Proto = p[0] + options := strings.Split(p[1], ",") + uri.Raddr = options[0] + for _, o := range options[1:] { + kv := strings.SplitN(o, "=", 2) + var k, v string + if len(kv) == 2 { + k, v = kv[0], kv[1] + } else { + k, v = o, "true" + } + switch k { + case "laddr": + uri.Laddr = v + case "timeout": + to, err := time.ParseDuration(v) + if err != nil { + return nil, err + } + uri.Timeout = to + default: + return nil, errors.New("Unknown option: " + k) + } + } + // Remove protocol part + pd = pd[1:] + } + // Parse database part of URI + dup := strings.SplitN(pd[0], "/", 3) + if len(dup) != 3 { + return nil, errors.New("Wrong database part of URI") + } + uri.DBName = dup[0] + uri.User = dup[1] + uri.Passwd = dup[2] + + return uri, nil +} + +type mysqlDriver struct { +} + +func (p *mysqlDriver) Parse(driverName, dataSourceName string) (*URI, error) { + dsnPattern := regexp.MustCompile( + `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@] + `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]] + `\/(?P.*?)` + // /dbname + `(?:\?(?P[^\?]*))?$`) // [?param1=value1¶mN=valueN] + matches := dsnPattern.FindStringSubmatch(dataSourceName) + // tlsConfigRegister := make(map[string]*tls.Config) + names := dsnPattern.SubexpNames() + + uri := &URI{DBType: schemas.MYSQL} + + for i, match := range matches { + switch names[i] { + case "dbname": + uri.DBName = match + case "params": + if len(match) > 0 { + kvs := strings.Split(match, "&") + for _, kv := range kvs { + splits := strings.Split(kv, "=") + if len(splits) == 2 { + switch splits[0] { + case "charset": + uri.Charset = splits[1] + } + } + } + } + + } + } + return uri, nil +} diff --git a/vendor/xorm.io/xorm/dialects/oracle.go b/vendor/xorm.io/xorm/dialects/oracle.go new file mode 100644 index 0000000..91eed25 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/oracle.go @@ -0,0 +1,854 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +var ( + oracleReservedWords = map[string]bool{ + "ACCESS": true, + "ACCOUNT": true, + "ACTIVATE": true, + "ADD": true, + "ADMIN": true, + "ADVISE": true, + "AFTER": true, + "ALL": true, + "ALL_ROWS": true, + "ALLOCATE": true, + "ALTER": true, + "ANALYZE": true, + "AND": true, + "ANY": true, + "ARCHIVE": true, + "ARCHIVELOG": true, + "ARRAY": true, + "AS": true, + "ASC": true, + "AT": true, + "AUDIT": true, + "AUTHENTICATED": true, + "AUTHORIZATION": true, + "AUTOEXTEND": true, + "AUTOMATIC": true, + "BACKUP": true, + "BECOME": true, + "BEFORE": true, + "BEGIN": true, + "BETWEEN": true, + "BFILE": true, + "BITMAP": true, + "BLOB": true, + "BLOCK": true, + "BODY": true, + "BY": true, + "CACHE": true, + "CACHE_INSTANCES": true, + "CANCEL": true, + "CASCADE": true, + "CAST": true, + "CFILE": true, + "CHAINED": true, + "CHANGE": true, + "CHAR": true, + "CHAR_CS": true, + "CHARACTER": true, + "CHECK": true, + "CHECKPOINT": true, + "CHOOSE": true, + "CHUNK": true, + "CLEAR": true, + "CLOB": true, + "CLONE": true, + "CLOSE": true, + "CLOSE_CACHED_OPEN_CURSORS": true, + "CLUSTER": true, + "COALESCE": true, + "COLUMN": true, + "COLUMNS": true, + "COMMENT": true, + "COMMIT": true, + "COMMITTED": true, + "COMPATIBILITY": true, + "COMPILE": true, + "COMPLETE": true, + "COMPOSITE_LIMIT": true, + "COMPRESS": true, + "COMPUTE": true, + "CONNECT": true, + "CONNECT_TIME": true, + "CONSTRAINT": true, + "CONSTRAINTS": true, + "CONTENTS": true, + "CONTINUE": true, + "CONTROLFILE": true, + "CONVERT": true, + "COST": true, + "CPU_PER_CALL": true, + "CPU_PER_SESSION": true, + "CREATE": true, + "CURRENT": true, + "CURRENT_SCHEMA": true, + "CURREN_USER": true, + "CURSOR": true, + "CYCLE": true, + "DANGLING": true, + "DATABASE": true, + "DATAFILE": true, + "DATAFILES": true, + "DATAOBJNO": true, + "DATE": true, + "DBA": true, + "DBHIGH": true, + "DBLOW": true, + "DBMAC": true, + "DEALLOCATE": true, + "DEBUG": true, + "DEC": true, + "DECIMAL": true, + "DECLARE": true, + "DEFAULT": true, + "DEFERRABLE": true, + "DEFERRED": true, + "DEGREE": true, + "DELETE": true, + "DEREF": true, + "DESC": true, + "DIRECTORY": true, + "DISABLE": true, + "DISCONNECT": true, + "DISMOUNT": true, + "DISTINCT": true, + "DISTRIBUTED": true, + "DML": true, + "DOUBLE": true, + "DROP": true, + "DUMP": true, + "EACH": true, + "ELSE": true, + "ENABLE": true, + "END": true, + "ENFORCE": true, + "ENTRY": true, + "ESCAPE": true, + "EXCEPT": true, + "EXCEPTIONS": true, + "EXCHANGE": true, + "EXCLUDING": true, + "EXCLUSIVE": true, + "EXECUTE": true, + "EXISTS": true, + "EXPIRE": true, + "EXPLAIN": true, + "EXTENT": true, + "EXTENTS": true, + "EXTERNALLY": true, + "FAILED_LOGIN_ATTEMPTS": true, + "FALSE": true, + "FAST": true, + "FILE": true, + "FIRST_ROWS": true, + "FLAGGER": true, + "FLOAT": true, + "FLOB": true, + "FLUSH": true, + "FOR": true, + "FORCE": true, + "FOREIGN": true, + "FREELIST": true, + "FREELISTS": true, + "FROM": true, + "FULL": true, + "FUNCTION": true, + "GLOBAL": true, + "GLOBALLY": true, + "GLOBAL_NAME": true, + "GRANT": true, + "GROUP": true, + "GROUPS": true, + "HASH": true, + "HASHKEYS": true, + "HAVING": true, + "HEADER": true, + "HEAP": true, + "IDENTIFIED": true, + "IDGENERATORS": true, + "IDLE_TIME": true, + "IF": true, + "IMMEDIATE": true, + "IN": true, + "INCLUDING": true, + "INCREMENT": true, + "INDEX": true, + "INDEXED": true, + "INDEXES": true, + "INDICATOR": true, + "IND_PARTITION": true, + "INITIAL": true, + "INITIALLY": true, + "INITRANS": true, + "INSERT": true, + "INSTANCE": true, + "INSTANCES": true, + "INSTEAD": true, + "INT": true, + "INTEGER": true, + "INTERMEDIATE": true, + "INTERSECT": true, + "INTO": true, + "IS": true, + "ISOLATION": true, + "ISOLATION_LEVEL": true, + "KEEP": true, + "KEY": true, + "KILL": true, + "LABEL": true, + "LAYER": true, + "LESS": true, + "LEVEL": true, + "LIBRARY": true, + "LIKE": true, + "LIMIT": true, + "LINK": true, + "LIST": true, + "LOB": true, + "LOCAL": true, + "LOCK": true, + "LOCKED": true, + "LOG": true, + "LOGFILE": true, + "LOGGING": true, + "LOGICAL_READS_PER_CALL": true, + "LOGICAL_READS_PER_SESSION": true, + "LONG": true, + "MANAGE": true, + "MASTER": true, + "MAX": true, + "MAXARCHLOGS": true, + "MAXDATAFILES": true, + "MAXEXTENTS": true, + "MAXINSTANCES": true, + "MAXLOGFILES": true, + "MAXLOGHISTORY": true, + "MAXLOGMEMBERS": true, + "MAXSIZE": true, + "MAXTRANS": true, + "MAXVALUE": true, + "MIN": true, + "MEMBER": true, + "MINIMUM": true, + "MINEXTENTS": true, + "MINUS": true, + "MINVALUE": true, + "MLSLABEL": true, + "MLS_LABEL_FORMAT": true, + "MODE": true, + "MODIFY": true, + "MOUNT": true, + "MOVE": true, + "MTS_DISPATCHERS": true, + "MULTISET": true, + "NATIONAL": true, + "NCHAR": true, + "NCHAR_CS": true, + "NCLOB": true, + "NEEDED": true, + "NESTED": true, + "NETWORK": true, + "NEW": true, + "NEXT": true, + "NOARCHIVELOG": true, + "NOAUDIT": true, + "NOCACHE": true, + "NOCOMPRESS": true, + "NOCYCLE": true, + "NOFORCE": true, + "NOLOGGING": true, + "NOMAXVALUE": true, + "NOMINVALUE": true, + "NONE": true, + "NOORDER": true, + "NOOVERRIDE": true, + "NOPARALLEL": true, + "NOREVERSE": true, + "NORMAL": true, + "NOSORT": true, + "NOT": true, + "NOTHING": true, + "NOWAIT": true, + "NULL": true, + "NUMBER": true, + "NUMERIC": true, + "NVARCHAR2": true, + "OBJECT": true, + "OBJNO": true, + "OBJNO_REUSE": true, + "OF": true, + "OFF": true, + "OFFLINE": true, + "OID": true, + "OIDINDEX": true, + "OLD": true, + "ON": true, + "ONLINE": true, + "ONLY": true, + "OPCODE": true, + "OPEN": true, + "OPTIMAL": true, + "OPTIMIZER_GOAL": true, + "OPTION": true, + "OR": true, + "ORDER": true, + "ORGANIZATION": true, + "OSLABEL": true, + "OVERFLOW": true, + "OWN": true, + "PACKAGE": true, + "PARALLEL": true, + "PARTITION": true, + "PASSWORD": true, + "PASSWORD_GRACE_TIME": true, + "PASSWORD_LIFE_TIME": true, + "PASSWORD_LOCK_TIME": true, + "PASSWORD_REUSE_MAX": true, + "PASSWORD_REUSE_TIME": true, + "PASSWORD_VERIFY_FUNCTION": true, + "PCTFREE": true, + "PCTINCREASE": true, + "PCTTHRESHOLD": true, + "PCTUSED": true, + "PCTVERSION": true, + "PERCENT": true, + "PERMANENT": true, + "PLAN": true, + "PLSQL_DEBUG": true, + "POST_TRANSACTION": true, + "PRECISION": true, + "PRESERVE": true, + "PRIMARY": true, + "PRIOR": true, + "PRIVATE": true, + "PRIVATE_SGA": true, + "PRIVILEGE": true, + "PRIVILEGES": true, + "PROCEDURE": true, + "PROFILE": true, + "PUBLIC": true, + "PURGE": true, + "QUEUE": true, + "QUOTA": true, + "RANGE": true, + "RAW": true, + "RBA": true, + "READ": true, + "READUP": true, + "REAL": true, + "REBUILD": true, + "RECOVER": true, + "RECOVERABLE": true, + "RECOVERY": true, + "REF": true, + "REFERENCES": true, + "REFERENCING": true, + "REFRESH": true, + "RENAME": true, + "REPLACE": true, + "RESET": true, + "RESETLOGS": true, + "RESIZE": true, + "RESOURCE": true, + "RESTRICTED": true, + "RETURN": true, + "RETURNING": true, + "REUSE": true, + "REVERSE": true, + "REVOKE": true, + "ROLE": true, + "ROLES": true, + "ROLLBACK": true, + "ROW": true, + "ROWID": true, + "ROWNUM": true, + "ROWS": true, + "RULE": true, + "SAMPLE": true, + "SAVEPOINT": true, + "SB4": true, + "SCAN_INSTANCES": true, + "SCHEMA": true, + "SCN": true, + "SCOPE": true, + "SD_ALL": true, + "SD_INHIBIT": true, + "SD_SHOW": true, + "SEGMENT": true, + "SEG_BLOCK": true, + "SEG_FILE": true, + "SELECT": true, + "SEQUENCE": true, + "SERIALIZABLE": true, + "SESSION": true, + "SESSION_CACHED_CURSORS": true, + "SESSIONS_PER_USER": true, + "SET": true, + "SHARE": true, + "SHARED": true, + "SHARED_POOL": true, + "SHRINK": true, + "SIZE": true, + "SKIP": true, + "SKIP_UNUSABLE_INDEXES": true, + "SMALLINT": true, + "SNAPSHOT": true, + "SOME": true, + "SORT": true, + "SPECIFICATION": true, + "SPLIT": true, + "SQL_TRACE": true, + "STANDBY": true, + "START": true, + "STATEMENT_ID": true, + "STATISTICS": true, + "STOP": true, + "STORAGE": true, + "STORE": true, + "STRUCTURE": true, + "SUCCESSFUL": true, + "SWITCH": true, + "SYS_OP_ENFORCE_NOT_NULL$": true, + "SYS_OP_NTCIMG$": true, + "SYNONYM": true, + "SYSDATE": true, + "SYSDBA": true, + "SYSOPER": true, + "SYSTEM": true, + "TABLE": true, + "TABLES": true, + "TABLESPACE": true, + "TABLESPACE_NO": true, + "TABNO": true, + "TEMPORARY": true, + "THAN": true, + "THE": true, + "THEN": true, + "THREAD": true, + "TIMESTAMP": true, + "TIME": true, + "TO": true, + "TOPLEVEL": true, + "TRACE": true, + "TRACING": true, + "TRANSACTION": true, + "TRANSITIONAL": true, + "TRIGGER": true, + "TRIGGERS": true, + "TRUE": true, + "TRUNCATE": true, + "TX": true, + "TYPE": true, + "UB2": true, + "UBA": true, + "UID": true, + "UNARCHIVED": true, + "UNDO": true, + "UNION": true, + "UNIQUE": true, + "UNLIMITED": true, + "UNLOCK": true, + "UNRECOVERABLE": true, + "UNTIL": true, + "UNUSABLE": true, + "UNUSED": true, + "UPDATABLE": true, + "UPDATE": true, + "USAGE": true, + "USE": true, + "USER": true, + "USING": true, + "VALIDATE": true, + "VALIDATION": true, + "VALUE": true, + "VALUES": true, + "VARCHAR": true, + "VARCHAR2": true, + "VARYING": true, + "VIEW": true, + "WHEN": true, + "WHENEVER": true, + "WHERE": true, + "WITH": true, + "WITHOUT": true, + "WORK": true, + "WRITE": true, + "WRITEDOWN": true, + "WRITEUP": true, + "XID": true, + "YEAR": true, + "ZONE": true, + } + + oracleQuoter = schemas.Quoter{ + Prefix: '"', + Suffix: '"', + IsReserved: schemas.AlwaysReserve, + } +) + +type oracle struct { + Base +} + +func (db *oracle) Init(uri *URI) error { + db.quoter = oracleQuoter + return db.Base.Init(db, uri) +} + +func (db *oracle) SQLType(c *schemas.Column) string { + var res string + switch t := c.SQLType.Name; t { + case schemas.Bit, schemas.TinyInt, schemas.SmallInt, schemas.MediumInt, schemas.Int, schemas.Integer, schemas.BigInt, schemas.Bool, schemas.Serial, schemas.BigSerial: + res = "NUMBER" + case schemas.Binary, schemas.VarBinary, schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea: + return schemas.Blob + case schemas.Time, schemas.DateTime, schemas.TimeStamp: + res = schemas.TimeStamp + case schemas.TimeStampz: + res = "TIMESTAMP WITH TIME ZONE" + case schemas.Float, schemas.Double, schemas.Numeric, schemas.Decimal: + res = "NUMBER" + case schemas.Text, schemas.MediumText, schemas.LongText, schemas.Json: + res = "CLOB" + case schemas.Char, schemas.Varchar, schemas.TinyText: + res = "VARCHAR2" + default: + res = t + } + + hasLen1 := (c.Length > 0) + hasLen2 := (c.Length2 > 0) + + if hasLen2 { + res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" + } else if hasLen1 { + res += "(" + strconv.Itoa(c.Length) + ")" + } + return res +} + +func (db *oracle) AutoIncrStr() string { + return "AUTO_INCREMENT" +} + +func (db *oracle) IsReserved(name string) bool { + _, ok := oracleReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *oracle) DropTableSQL(tableName string) (string, bool) { + return fmt.Sprintf("DROP TABLE `%s`", tableName), false +} + +func (db *oracle) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql = "CREATE TABLE " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + sql += quoter.Quote(tableName) + " (" + + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + /*if col.IsPrimaryKey && len(pkList) == 1 { + sql += col.String(b.dialect) + } else {*/ + s, _ := ColumnString(db, col, false) + sql += s + // } + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 0 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + ")" + return []string{sql}, false +} + +func (db *oracle) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = oracleQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = oracleQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = oracleQuoter + } +} + +func (db *oracle) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{tableName, idxName} + return `SELECT INDEX_NAME FROM USER_INDEXES ` + + `WHERE TABLE_NAME = :1 AND INDEX_NAME = :2`, args +} + +func (db *oracle) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + return db.HasRecords(queryer, ctx, `SELECT table_name FROM user_tables WHERE table_name = :1`, tableName) +} + +func (db *oracle) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + args := []interface{}{tableName, colName} + query := "SELECT column_name FROM USER_TAB_COLUMNS WHERE table_name = :1" + + " AND column_name = :2" + return db.HasRecords(queryer, ctx, query, args...) +} + +func (db *oracle) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{tableName} + s := "SELECT column_name,data_default,data_type,data_length,data_precision,data_scale," + + "nullable FROM USER_TAB_COLUMNS WHERE table_name = :1" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + for rows.Next() { + col := new(schemas.Column) + col.Indexes = make(map[string]int) + + var colName, colDefault, nullable, dataType, dataPrecision, dataScale *string + var dataLen int + + err = rows.Scan(&colName, &colDefault, &dataType, &dataLen, &dataPrecision, + &dataScale, &nullable) + if err != nil { + return nil, nil, err + } + + col.Name = strings.Trim(*colName, `" `) + if colDefault != nil { + col.Default = *colDefault + col.DefaultIsEmpty = false + } + + if *nullable == "Y" { + col.Nullable = true + } else { + col.Nullable = false + } + + var ignore bool + + var dt string + var len1, len2 int + dts := strings.Split(*dataType, "(") + dt = dts[0] + if len(dts) > 1 { + lens := strings.Split(dts[1][:len(dts[1])-1], ",") + if len(lens) > 1 { + len1, _ = strconv.Atoi(lens[0]) + len2, _ = strconv.Atoi(lens[1]) + } else { + len1, _ = strconv.Atoi(lens[0]) + } + } + + switch dt { + case "VARCHAR2": + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: len1, DefaultLength2: len2} + case "NVARCHAR2": + col.SQLType = schemas.SQLType{Name: schemas.NVarchar, DefaultLength: len1, DefaultLength2: len2} + case "TIMESTAMP WITH TIME ZONE": + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + case "NUMBER": + col.SQLType = schemas.SQLType{Name: schemas.Double, DefaultLength: len1, DefaultLength2: len2} + case "LONG", "LONG RAW": + col.SQLType = schemas.SQLType{Name: schemas.Text, DefaultLength: 0, DefaultLength2: 0} + case "RAW": + col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} + case "ROWID": + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 18, DefaultLength2: 0} + case "AQ$_SUBSCRIBERS": + ignore = true + default: + col.SQLType = schemas.SQLType{Name: strings.ToUpper(dt), DefaultLength: len1, DefaultLength2: len2} + } + + if ignore { + continue + } + + if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { + return nil, nil, fmt.Errorf("Unknown colType %v %v", *dataType, col.SQLType) + } + + col.Length = dataLen + + if col.SQLType.IsText() || col.SQLType.IsTime() { + if !col.DefaultIsEmpty { + col.Default = "'" + col.Default + "'" + } + } + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + + return colSeq, cols, nil +} + +func (db *oracle) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{} + s := "SELECT table_name FROM user_tables" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + err = rows.Scan(&table.Name) + if err != nil { + return nil, err + } + + tables = append(tables, table) + } + return tables, nil +} + +func (db *oracle) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{tableName} + s := "SELECT t.column_name,i.uniqueness,i.index_name FROM user_ind_columns t,user_indexes i " + + "WHERE t.index_name = i.index_name and t.table_name = i.table_name and t.table_name =:1" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var indexType int + var indexName, colName, uniqueness string + + err = rows.Scan(&colName, &uniqueness, &indexName) + if err != nil { + return nil, err + } + + indexName = strings.Trim(indexName, `" `) + + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + indexName = indexName[5+len(tableName):] + isRegular = true + } + + if uniqueness == "UNIQUE" { + indexType = schemas.UniqueType + } else { + indexType = schemas.IndexType + } + + var index *schemas.Index + var ok bool + if index, ok = indexes[indexName]; !ok { + index = new(schemas.Index) + index.Type = indexType + index.Name = indexName + index.IsRegular = isRegular + indexes[indexName] = index + } + index.AddColumn(colName) + } + return indexes, nil +} + +func (db *oracle) Filters() []Filter { + return []Filter{ + &SeqFilter{Prefix: ":", Start: 1}, + } +} + +type goracleDriver struct { +} + +func (cfg *goracleDriver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.ORACLE} + dsnPattern := regexp.MustCompile( + `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@] + `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]] + `\/(?P.*?)` + // /dbname + `(?:\?(?P[^\?]*))?$`) // [?param1=value1¶mN=valueN] + matches := dsnPattern.FindStringSubmatch(dataSourceName) + // tlsConfigRegister := make(map[string]*tls.Config) + names := dsnPattern.SubexpNames() + + for i, match := range matches { + switch names[i] { + case "dbname": + db.DBName = match + } + } + if db.DBName == "" { + return nil, errors.New("dbname is empty") + } + return db, nil +} + +type oci8Driver struct { +} + +// dataSourceName=user/password@ipv4:port/dbname +// dataSourceName=user/password@[ipv6]:port/dbname +func (p *oci8Driver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.ORACLE} + dsnPattern := regexp.MustCompile( + `^(?P.*)\/(?P.*)@` + // user:password@ + `(?P.*)` + // ip:port + `\/(?P.*)`) // dbname + matches := dsnPattern.FindStringSubmatch(dataSourceName) + names := dsnPattern.SubexpNames() + for i, match := range matches { + switch names[i] { + case "dbname": + db.DBName = match + } + } + if db.DBName == "" && len(matches) != 0 { + return nil, errors.New("dbname is empty") + } + return db, nil +} diff --git a/vendor/xorm.io/xorm/dialects/pg_reserved.txt b/vendor/xorm.io/xorm/dialects/pg_reserved.txt new file mode 100644 index 0000000..720ed37 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/pg_reserved.txt @@ -0,0 +1,746 @@ +A non-reserved non-reserved +ABORT non-reserved +ABS reserved reserved +ABSENT non-reserved non-reserved +ABSOLUTE non-reserved non-reserved non-reserved reserved +ACCESS non-reserved +ACCORDING non-reserved non-reserved +ACTION non-reserved non-reserved non-reserved reserved +ADA non-reserved non-reserved non-reserved +ADD non-reserved non-reserved non-reserved reserved +ADMIN non-reserved non-reserved non-reserved +AFTER non-reserved non-reserved non-reserved +AGGREGATE non-reserved +ALL reserved reserved reserved reserved +ALLOCATE reserved reserved reserved +ALSO non-reserved +ALTER non-reserved reserved reserved reserved +ALWAYS non-reserved non-reserved non-reserved +ANALYSE reserved +ANALYZE reserved +AND reserved reserved reserved reserved +ANY reserved reserved reserved reserved +ARE reserved reserved reserved +ARRAY reserved reserved reserved +ARRAY_AGG reserved reserved +ARRAY_MAX_CARDINALITY reserved +AS reserved reserved reserved reserved +ASC reserved non-reserved non-reserved reserved +ASENSITIVE reserved reserved +ASSERTION non-reserved non-reserved non-reserved reserved +ASSIGNMENT non-reserved non-reserved non-reserved +ASYMMETRIC reserved reserved reserved +AT non-reserved reserved reserved reserved +ATOMIC reserved reserved +ATTRIBUTE non-reserved non-reserved non-reserved +ATTRIBUTES non-reserved non-reserved +AUTHORIZATION reserved (can be function or type) reserved reserved reserved +AVG reserved reserved reserved +BACKWARD non-reserved +BASE64 non-reserved non-reserved +BEFORE non-reserved non-reserved non-reserved +BEGIN non-reserved reserved reserved reserved +BEGIN_FRAME reserved +BEGIN_PARTITION reserved +BERNOULLI non-reserved non-reserved +BETWEEN non-reserved (cannot be function or type) reserved reserved reserved +BIGINT non-reserved (cannot be function or type) reserved reserved +BINARY reserved (can be function or type) reserved reserved +BIT non-reserved (cannot be function or type) reserved +BIT_LENGTH reserved +BLOB reserved reserved +BLOCKED non-reserved non-reserved +BOM non-reserved non-reserved +BOOLEAN non-reserved (cannot be function or type) reserved reserved +BOTH reserved reserved reserved reserved +BREADTH non-reserved non-reserved +BY non-reserved reserved reserved reserved +C non-reserved non-reserved non-reserved +CACHE non-reserved +CALL reserved reserved +CALLED non-reserved reserved reserved +CARDINALITY reserved reserved +CASCADE non-reserved non-reserved non-reserved reserved +CASCADED non-reserved reserved reserved reserved +CASE reserved reserved reserved reserved +CAST reserved reserved reserved reserved +CATALOG non-reserved non-reserved non-reserved reserved +CATALOG_NAME non-reserved non-reserved non-reserved +CEIL reserved reserved +CEILING reserved reserved +CHAIN non-reserved non-reserved non-reserved +CHAR non-reserved (cannot be function or type) reserved reserved reserved +CHARACTER non-reserved (cannot be function or type) reserved reserved reserved +CHARACTERISTICS non-reserved non-reserved non-reserved +CHARACTERS non-reserved non-reserved +CHARACTER_LENGTH reserved reserved reserved +CHARACTER_SET_CATALOG non-reserved non-reserved non-reserved +CHARACTER_SET_NAME non-reserved non-reserved non-reserved +CHARACTER_SET_SCHEMA non-reserved non-reserved non-reserved +CHAR_LENGTH reserved reserved reserved +CHECK reserved reserved reserved reserved +CHECKPOINT non-reserved +CLASS non-reserved +CLASS_ORIGIN non-reserved non-reserved non-reserved +CLOB reserved reserved +CLOSE non-reserved reserved reserved reserved +CLUSTER non-reserved +COALESCE non-reserved (cannot be function or type) reserved reserved reserved +COBOL non-reserved non-reserved non-reserved +COLLATE reserved reserved reserved reserved +COLLATION reserved (can be function or type) non-reserved non-reserved reserved +COLLATION_CATALOG non-reserved non-reserved non-reserved +COLLATION_NAME non-reserved non-reserved non-reserved +COLLATION_SCHEMA non-reserved non-reserved non-reserved +COLLECT reserved reserved +COLUMN reserved reserved reserved reserved +COLUMNS non-reserved non-reserved +COLUMN_NAME non-reserved non-reserved non-reserved +COMMAND_FUNCTION non-reserved non-reserved non-reserved +COMMAND_FUNCTION_CODE non-reserved non-reserved +COMMENT non-reserved +COMMENTS non-reserved +COMMIT non-reserved reserved reserved reserved +COMMITTED non-reserved non-reserved non-reserved non-reserved +CONCURRENTLY reserved (can be function or type) +CONDITION reserved reserved +CONDITION_NUMBER non-reserved non-reserved non-reserved +CONFIGURATION non-reserved +CONNECT reserved reserved reserved +CONNECTION non-reserved non-reserved non-reserved reserved +CONNECTION_NAME non-reserved non-reserved non-reserved +CONSTRAINT reserved reserved reserved reserved +CONSTRAINTS non-reserved non-reserved non-reserved reserved +CONSTRAINT_CATALOG non-reserved non-reserved non-reserved +CONSTRAINT_NAME non-reserved non-reserved non-reserved +CONSTRAINT_SCHEMA non-reserved non-reserved non-reserved +CONSTRUCTOR non-reserved non-reserved +CONTAINS reserved non-reserved +CONTENT non-reserved non-reserved non-reserved +CONTINUE non-reserved non-reserved non-reserved reserved +CONTROL non-reserved non-reserved +CONVERSION non-reserved +CONVERT reserved reserved reserved +COPY non-reserved +CORR reserved reserved +CORRESPONDING reserved reserved reserved +COST non-reserved +COUNT reserved reserved reserved +COVAR_POP reserved reserved +COVAR_SAMP reserved reserved +CREATE reserved reserved reserved reserved +CROSS reserved (can be function or type) reserved reserved reserved +CSV non-reserved +CUBE reserved reserved +CUME_DIST reserved reserved +CURRENT non-reserved reserved reserved reserved +CURRENT_CATALOG reserved reserved reserved +CURRENT_DATE reserved reserved reserved reserved +CURRENT_DEFAULT_TRANSFORM_GROUP reserved reserved +CURRENT_PATH reserved reserved +CURRENT_ROLE reserved reserved reserved +CURRENT_ROW reserved +CURRENT_SCHEMA reserved (can be function or type) reserved reserved +CURRENT_TIME reserved reserved reserved reserved +CURRENT_TIMESTAMP reserved reserved reserved reserved +CURRENT_TRANSFORM_GROUP_FOR_TYPE reserved reserved +CURRENT_USER reserved reserved reserved reserved +CURSOR non-reserved reserved reserved reserved +CURSOR_NAME non-reserved non-reserved non-reserved +CYCLE non-reserved reserved reserved +DATA non-reserved non-reserved non-reserved non-reserved +DATABASE non-reserved +DATALINK reserved reserved +DATE reserved reserved reserved +DATETIME_INTERVAL_CODE non-reserved non-reserved non-reserved +DATETIME_INTERVAL_PRECISION non-reserved non-reserved non-reserved +DAY non-reserved reserved reserved reserved +DB non-reserved non-reserved +DEALLOCATE non-reserved reserved reserved reserved +DEC non-reserved (cannot be function or type) reserved reserved reserved +DECIMAL non-reserved (cannot be function or type) reserved reserved reserved +DECLARE non-reserved reserved reserved reserved +DEFAULT reserved reserved reserved reserved +DEFAULTS non-reserved non-reserved non-reserved +DEFERRABLE reserved non-reserved non-reserved reserved +DEFERRED non-reserved non-reserved non-reserved reserved +DEFINED non-reserved non-reserved +DEFINER non-reserved non-reserved non-reserved +DEGREE non-reserved non-reserved +DELETE non-reserved reserved reserved reserved +DELIMITER non-reserved +DELIMITERS non-reserved +DENSE_RANK reserved reserved +DEPTH non-reserved non-reserved +DEREF reserved reserved +DERIVED non-reserved non-reserved +DESC reserved non-reserved non-reserved reserved +DESCRIBE reserved reserved reserved +DESCRIPTOR non-reserved non-reserved reserved +DETERMINISTIC reserved reserved +DIAGNOSTICS non-reserved non-reserved reserved +DICTIONARY non-reserved +DISABLE non-reserved +DISCARD non-reserved +DISCONNECT reserved reserved reserved +DISPATCH non-reserved non-reserved +DISTINCT reserved reserved reserved reserved +DLNEWCOPY reserved reserved +DLPREVIOUSCOPY reserved reserved +DLURLCOMPLETE reserved reserved +DLURLCOMPLETEONLY reserved reserved +DLURLCOMPLETEWRITE reserved reserved +DLURLPATH reserved reserved +DLURLPATHONLY reserved reserved +DLURLPATHWRITE reserved reserved +DLURLSCHEME reserved reserved +DLURLSERVER reserved reserved +DLVALUE reserved reserved +DO reserved +DOCUMENT non-reserved non-reserved non-reserved +DOMAIN non-reserved non-reserved non-reserved reserved +DOUBLE non-reserved reserved reserved reserved +DROP non-reserved reserved reserved reserved +DYNAMIC reserved reserved +DYNAMIC_FUNCTION non-reserved non-reserved non-reserved +DYNAMIC_FUNCTION_CODE non-reserved non-reserved +EACH non-reserved reserved reserved +ELEMENT reserved reserved +ELSE reserved reserved reserved reserved +EMPTY non-reserved non-reserved +ENABLE non-reserved +ENCODING non-reserved non-reserved non-reserved +ENCRYPTED non-reserved +END reserved reserved reserved reserved +END-EXEC reserved reserved reserved +END_FRAME reserved +END_PARTITION reserved +ENFORCED non-reserved +ENUM non-reserved +EQUALS reserved non-reserved +ESCAPE non-reserved reserved reserved reserved +EVENT non-reserved +EVERY reserved reserved +EXCEPT reserved reserved reserved reserved +EXCEPTION reserved +EXCLUDE non-reserved non-reserved non-reserved +EXCLUDING non-reserved non-reserved non-reserved +EXCLUSIVE non-reserved +EXEC reserved reserved reserved +EXECUTE non-reserved reserved reserved reserved +EXISTS non-reserved (cannot be function or type) reserved reserved reserved +EXP reserved reserved +EXPLAIN non-reserved +EXPRESSION non-reserved +EXTENSION non-reserved +EXTERNAL non-reserved reserved reserved reserved +EXTRACT non-reserved (cannot be function or type) reserved reserved reserved +FALSE reserved reserved reserved reserved +FAMILY non-reserved +FETCH reserved reserved reserved reserved +FILE non-reserved non-reserved +FILTER reserved reserved +FINAL non-reserved non-reserved +FIRST non-reserved non-reserved non-reserved reserved +FIRST_VALUE reserved reserved +FLAG non-reserved non-reserved +FLOAT non-reserved (cannot be function or type) reserved reserved reserved +FLOOR reserved reserved +FOLLOWING non-reserved non-reserved non-reserved +FOR reserved reserved reserved reserved +FORCE non-reserved +FOREIGN reserved reserved reserved reserved +FORTRAN non-reserved non-reserved non-reserved +FORWARD non-reserved +FOUND non-reserved non-reserved reserved +FRAME_ROW reserved +FREE reserved reserved +FREEZE reserved (can be function or type) +FROM reserved reserved reserved reserved +FS non-reserved non-reserved +FULL reserved (can be function or type) reserved reserved reserved +FUNCTION non-reserved reserved reserved +FUNCTIONS non-reserved +FUSION reserved reserved +G non-reserved non-reserved +GENERAL non-reserved non-reserved +GENERATED non-reserved non-reserved +GET reserved reserved reserved +GLOBAL non-reserved reserved reserved reserved +GO non-reserved non-reserved reserved +GOTO non-reserved non-reserved reserved +GRANT reserved reserved reserved reserved +GRANTED non-reserved non-reserved non-reserved +GREATEST non-reserved (cannot be function or type) +GROUP reserved reserved reserved reserved +GROUPING reserved reserved +GROUPS reserved +HANDLER non-reserved +HAVING reserved reserved reserved reserved +HEADER non-reserved +HEX non-reserved non-reserved +HIERARCHY non-reserved non-reserved +HOLD non-reserved reserved reserved +HOUR non-reserved reserved reserved reserved +ID non-reserved non-reserved +IDENTITY non-reserved reserved reserved reserved +IF non-reserved +IGNORE non-reserved non-reserved +ILIKE reserved (can be function or type) +IMMEDIATE non-reserved non-reserved non-reserved reserved +IMMEDIATELY non-reserved +IMMUTABLE non-reserved +IMPLEMENTATION non-reserved non-reserved +IMPLICIT non-reserved +IMPORT reserved reserved +IN reserved reserved reserved reserved +INCLUDING non-reserved non-reserved non-reserved +INCREMENT non-reserved non-reserved non-reserved +INDENT non-reserved non-reserved +INDEX non-reserved +INDEXES non-reserved +INDICATOR reserved reserved reserved +INHERIT non-reserved +INHERITS non-reserved +INITIALLY reserved non-reserved non-reserved reserved +INLINE non-reserved +INNER reserved (can be function or type) reserved reserved reserved +INOUT non-reserved (cannot be function or type) reserved reserved +INPUT non-reserved non-reserved non-reserved reserved +INSENSITIVE non-reserved reserved reserved reserved +INSERT non-reserved reserved reserved reserved +INSTANCE non-reserved non-reserved +INSTANTIABLE non-reserved non-reserved +INSTEAD non-reserved non-reserved non-reserved +INT non-reserved (cannot be function or type) reserved reserved reserved +INTEGER non-reserved (cannot be function or type) reserved reserved reserved +INTEGRITY non-reserved non-reserved +INTERSECT reserved reserved reserved reserved +INTERSECTION reserved reserved +INTERVAL non-reserved (cannot be function or type) reserved reserved reserved +INTO reserved reserved reserved reserved +INVOKER non-reserved non-reserved non-reserved +IS reserved (can be function or type) reserved reserved reserved +ISNULL reserved (can be function or type) +ISOLATION non-reserved non-reserved non-reserved reserved +JOIN reserved (can be function or type) reserved reserved reserved +K non-reserved non-reserved +KEY non-reserved non-reserved non-reserved reserved +KEY_MEMBER non-reserved non-reserved +KEY_TYPE non-reserved non-reserved +LABEL non-reserved +LAG reserved reserved +LANGUAGE non-reserved reserved reserved reserved +LARGE non-reserved reserved reserved +LAST non-reserved non-reserved non-reserved reserved +LAST_VALUE reserved reserved +LATERAL reserved reserved reserved +LC_COLLATE non-reserved +LC_CTYPE non-reserved +LEAD reserved reserved +LEADING reserved reserved reserved reserved +LEAKPROOF non-reserved +LEAST non-reserved (cannot be function or type) +LEFT reserved (can be function or type) reserved reserved reserved +LENGTH non-reserved non-reserved non-reserved +LEVEL non-reserved non-reserved non-reserved reserved +LIBRARY non-reserved non-reserved +LIKE reserved (can be function or type) reserved reserved reserved +LIKE_REGEX reserved reserved +LIMIT reserved non-reserved non-reserved +LINK non-reserved non-reserved +LISTEN non-reserved +LN reserved reserved +LOAD non-reserved +LOCAL non-reserved reserved reserved reserved +LOCALTIME reserved reserved reserved +LOCALTIMESTAMP reserved reserved reserved +LOCATION non-reserved non-reserved non-reserved +LOCATOR non-reserved non-reserved +LOCK non-reserved +LOWER reserved reserved reserved +M non-reserved non-reserved +MAP non-reserved non-reserved +MAPPING non-reserved non-reserved non-reserved +MATCH non-reserved reserved reserved reserved +MATCHED non-reserved non-reserved +MATERIALIZED non-reserved +MAX reserved reserved reserved +MAXVALUE non-reserved non-reserved non-reserved +MAX_CARDINALITY reserved +MEMBER reserved reserved +MERGE reserved reserved +MESSAGE_LENGTH non-reserved non-reserved non-reserved +MESSAGE_OCTET_LENGTH non-reserved non-reserved non-reserved +MESSAGE_TEXT non-reserved non-reserved non-reserved +METHOD reserved reserved +MIN reserved reserved reserved +MINUTE non-reserved reserved reserved reserved +MINVALUE non-reserved non-reserved non-reserved +MOD reserved reserved +MODE non-reserved +MODIFIES reserved reserved +MODULE reserved reserved reserved +MONTH non-reserved reserved reserved reserved +MORE non-reserved non-reserved non-reserved +MOVE non-reserved +MULTISET reserved reserved +MUMPS non-reserved non-reserved non-reserved +NAME non-reserved non-reserved non-reserved non-reserved +NAMES non-reserved non-reserved non-reserved reserved +NAMESPACE non-reserved non-reserved +NATIONAL non-reserved (cannot be function or type) reserved reserved reserved +NATURAL reserved (can be function or type) reserved reserved reserved +NCHAR non-reserved (cannot be function or type) reserved reserved reserved +NCLOB reserved reserved +NESTING non-reserved non-reserved +NEW reserved reserved +NEXT non-reserved non-reserved non-reserved reserved +NFC non-reserved non-reserved +NFD non-reserved non-reserved +NFKC non-reserved non-reserved +NFKD non-reserved non-reserved +NIL non-reserved non-reserved +NO non-reserved reserved reserved reserved +NONE non-reserved (cannot be function or type) reserved reserved +NORMALIZE reserved reserved +NORMALIZED non-reserved non-reserved +NOT reserved reserved reserved reserved +NOTHING non-reserved +NOTIFY non-reserved +NOTNULL reserved (can be function or type) +NOWAIT non-reserved +NTH_VALUE reserved reserved +NTILE reserved reserved +NULL reserved reserved reserved reserved +NULLABLE non-reserved non-reserved non-reserved +NULLIF non-reserved (cannot be function or type) reserved reserved reserved +NULLS non-reserved non-reserved non-reserved +NUMBER non-reserved non-reserved non-reserved +NUMERIC non-reserved (cannot be function or type) reserved reserved reserved +OBJECT non-reserved non-reserved non-reserved +OCCURRENCES_REGEX reserved reserved +OCTETS non-reserved non-reserved +OCTET_LENGTH reserved reserved reserved +OF non-reserved reserved reserved reserved +OFF non-reserved non-reserved non-reserved +OFFSET reserved reserved reserved +OIDS non-reserved +OLD reserved reserved +ON reserved reserved reserved reserved +ONLY reserved reserved reserved reserved +OPEN reserved reserved reserved +OPERATOR non-reserved +OPTION non-reserved non-reserved non-reserved reserved +OPTIONS non-reserved non-reserved non-reserved +OR reserved reserved reserved reserved +ORDER reserved reserved reserved reserved +ORDERING non-reserved non-reserved +ORDINALITY non-reserved non-reserved +OTHERS non-reserved non-reserved +OUT non-reserved (cannot be function or type) reserved reserved +OUTER reserved (can be function or type) reserved reserved reserved +OUTPUT non-reserved non-reserved reserved +OVER reserved (can be function or type) reserved reserved +OVERLAPS reserved (can be function or type) reserved reserved reserved +OVERLAY non-reserved (cannot be function or type) reserved reserved +OVERRIDING non-reserved non-reserved +OWNED non-reserved +OWNER non-reserved +P non-reserved non-reserved +PAD non-reserved non-reserved reserved +PARAMETER reserved reserved +PARAMETER_MODE non-reserved non-reserved +PARAMETER_NAME non-reserved non-reserved +PARAMETER_ORDINAL_POSITION non-reserved non-reserved +PARAMETER_SPECIFIC_CATALOG non-reserved non-reserved +PARAMETER_SPECIFIC_NAME non-reserved non-reserved +PARAMETER_SPECIFIC_SCHEMA non-reserved non-reserved +PARSER non-reserved +PARTIAL non-reserved non-reserved non-reserved reserved +PARTITION non-reserved reserved reserved +PASCAL non-reserved non-reserved non-reserved +PASSING non-reserved non-reserved non-reserved +PASSTHROUGH non-reserved non-reserved +PASSWORD non-reserved +PATH non-reserved non-reserved +PERCENT reserved +PERCENTILE_CONT reserved reserved +PERCENTILE_DISC reserved reserved +PERCENT_RANK reserved reserved +PERIOD reserved +PERMISSION non-reserved non-reserved +PLACING reserved non-reserved non-reserved +PLANS non-reserved +PLI non-reserved non-reserved non-reserved +PORTION reserved +POSITION non-reserved (cannot be function or type) reserved reserved reserved +POSITION_REGEX reserved reserved +POWER reserved reserved +PRECEDES reserved +PRECEDING non-reserved non-reserved non-reserved +PRECISION non-reserved (cannot be function or type) reserved reserved reserved +PREPARE non-reserved reserved reserved reserved +PREPARED non-reserved +PRESERVE non-reserved non-reserved non-reserved reserved +PRIMARY reserved reserved reserved reserved +PRIOR non-reserved non-reserved non-reserved reserved +PRIVILEGES non-reserved non-reserved non-reserved reserved +PROCEDURAL non-reserved +PROCEDURE non-reserved reserved reserved reserved +PROGRAM non-reserved +PUBLIC non-reserved non-reserved reserved +QUOTE non-reserved +RANGE non-reserved reserved reserved +RANK reserved reserved +READ non-reserved non-reserved non-reserved reserved +READS reserved reserved +REAL non-reserved (cannot be function or type) reserved reserved reserved +REASSIGN non-reserved +RECHECK non-reserved +RECOVERY non-reserved non-reserved +RECURSIVE non-reserved reserved reserved +REF non-reserved reserved reserved +REFERENCES reserved reserved reserved reserved +REFERENCING reserved reserved +REFRESH non-reserved +REGR_AVGX reserved reserved +REGR_AVGY reserved reserved +REGR_COUNT reserved reserved +REGR_INTERCEPT reserved reserved +REGR_R2 reserved reserved +REGR_SLOPE reserved reserved +REGR_SXX reserved reserved +REGR_SXY reserved reserved +REGR_SYY reserved reserved +REINDEX non-reserved +RELATIVE non-reserved non-reserved non-reserved reserved +RELEASE non-reserved reserved reserved +RENAME non-reserved +REPEATABLE non-reserved non-reserved non-reserved non-reserved +REPLACE non-reserved +REPLICA non-reserved +REQUIRING non-reserved non-reserved +RESET non-reserved +RESPECT non-reserved non-reserved +RESTART non-reserved non-reserved non-reserved +RESTORE non-reserved non-reserved +RESTRICT non-reserved non-reserved non-reserved reserved +RESULT reserved reserved +RETURN reserved reserved +RETURNED_CARDINALITY non-reserved non-reserved +RETURNED_LENGTH non-reserved non-reserved non-reserved +RETURNED_OCTET_LENGTH non-reserved non-reserved non-reserved +RETURNED_SQLSTATE non-reserved non-reserved non-reserved +RETURNING reserved non-reserved non-reserved +RETURNS non-reserved reserved reserved +REVOKE non-reserved reserved reserved reserved +RIGHT reserved (can be function or type) reserved reserved reserved +ROLE non-reserved non-reserved non-reserved +ROLLBACK non-reserved reserved reserved reserved +ROLLUP reserved reserved +ROUTINE non-reserved non-reserved +ROUTINE_CATALOG non-reserved non-reserved +ROUTINE_NAME non-reserved non-reserved +ROUTINE_SCHEMA non-reserved non-reserved +ROW non-reserved (cannot be function or type) reserved reserved +ROWS non-reserved reserved reserved reserved +ROW_COUNT non-reserved non-reserved non-reserved +ROW_NUMBER reserved reserved +RULE non-reserved +SAVEPOINT non-reserved reserved reserved +SCALE non-reserved non-reserved non-reserved +SCHEMA non-reserved non-reserved non-reserved reserved +SCHEMA_NAME non-reserved non-reserved non-reserved +SCOPE reserved reserved +SCOPE_CATALOG non-reserved non-reserved +SCOPE_NAME non-reserved non-reserved +SCOPE_SCHEMA non-reserved non-reserved +SCROLL non-reserved reserved reserved reserved +SEARCH non-reserved reserved reserved +SECOND non-reserved reserved reserved reserved +SECTION non-reserved non-reserved reserved +SECURITY non-reserved non-reserved non-reserved +SELECT reserved reserved reserved reserved +SELECTIVE non-reserved non-reserved +SELF non-reserved non-reserved +SENSITIVE reserved reserved +SEQUENCE non-reserved non-reserved non-reserved +SEQUENCES non-reserved +SERIALIZABLE non-reserved non-reserved non-reserved non-reserved +SERVER non-reserved non-reserved non-reserved +SERVER_NAME non-reserved non-reserved non-reserved +SESSION non-reserved non-reserved non-reserved reserved +SESSION_USER reserved reserved reserved reserved +SET non-reserved reserved reserved reserved +SETOF non-reserved (cannot be function or type) +SETS non-reserved non-reserved +SHARE non-reserved +SHOW non-reserved +SIMILAR reserved (can be function or type) reserved reserved +SIMPLE non-reserved non-reserved non-reserved +SIZE non-reserved non-reserved reserved +SMALLINT non-reserved (cannot be function or type) reserved reserved reserved +SNAPSHOT non-reserved +SOME reserved reserved reserved reserved +SOURCE non-reserved non-reserved +SPACE non-reserved non-reserved reserved +SPECIFIC reserved reserved +SPECIFICTYPE reserved reserved +SPECIFIC_NAME non-reserved non-reserved +SQL reserved reserved reserved +SQLCODE reserved +SQLERROR reserved +SQLEXCEPTION reserved reserved +SQLSTATE reserved reserved reserved +SQLWARNING reserved reserved +SQRT reserved reserved +STABLE non-reserved +STANDALONE non-reserved non-reserved non-reserved +START non-reserved reserved reserved +STATE non-reserved non-reserved +STATEMENT non-reserved non-reserved non-reserved +STATIC reserved reserved +STATISTICS non-reserved +STDDEV_POP reserved reserved +STDDEV_SAMP reserved reserved +STDIN non-reserved +STDOUT non-reserved +STORAGE non-reserved +STRICT non-reserved +STRIP non-reserved non-reserved non-reserved +STRUCTURE non-reserved non-reserved +STYLE non-reserved non-reserved +SUBCLASS_ORIGIN non-reserved non-reserved non-reserved +SUBMULTISET reserved reserved +SUBSTRING non-reserved (cannot be function or type) reserved reserved reserved +SUBSTRING_REGEX reserved reserved +SUCCEEDS reserved +SUM reserved reserved reserved +SYMMETRIC reserved reserved reserved +SYSID non-reserved +SYSTEM non-reserved reserved reserved +SYSTEM_TIME reserved +SYSTEM_USER reserved reserved reserved +T non-reserved non-reserved +TABLE reserved reserved reserved reserved +TABLES non-reserved +TABLESAMPLE reserved reserved +TABLESPACE non-reserved +TABLE_NAME non-reserved non-reserved non-reserved +TEMP non-reserved +TEMPLATE non-reserved +TEMPORARY non-reserved non-reserved non-reserved reserved +TEXT non-reserved +THEN reserved reserved reserved reserved +TIES non-reserved non-reserved +TIME non-reserved (cannot be function or type) reserved reserved reserved +TIMESTAMP non-reserved (cannot be function or type) reserved reserved reserved +TIMEZONE_HOUR reserved reserved reserved +TIMEZONE_MINUTE reserved reserved reserved +TO reserved reserved reserved reserved +TOKEN non-reserved non-reserved +TOP_LEVEL_COUNT non-reserved non-reserved +TRAILING reserved reserved reserved reserved +TRANSACTION non-reserved non-reserved non-reserved reserved +TRANSACTIONS_COMMITTED non-reserved non-reserved +TRANSACTIONS_ROLLED_BACK non-reserved non-reserved +TRANSACTION_ACTIVE non-reserved non-reserved +TRANSFORM non-reserved non-reserved +TRANSFORMS non-reserved non-reserved +TRANSLATE reserved reserved reserved +TRANSLATE_REGEX reserved reserved +TRANSLATION reserved reserved reserved +TREAT non-reserved (cannot be function or type) reserved reserved +TRIGGER non-reserved reserved reserved +TRIGGER_CATALOG non-reserved non-reserved +TRIGGER_NAME non-reserved non-reserved +TRIGGER_SCHEMA non-reserved non-reserved +TRIM non-reserved (cannot be function or type) reserved reserved reserved +TRIM_ARRAY reserved reserved +TRUE reserved reserved reserved reserved +TRUNCATE non-reserved reserved reserved +TRUSTED non-reserved +TYPE non-reserved non-reserved non-reserved non-reserved +TYPES non-reserved +UESCAPE reserved reserved +UNBOUNDED non-reserved non-reserved non-reserved +UNCOMMITTED non-reserved non-reserved non-reserved non-reserved +UNDER non-reserved non-reserved +UNENCRYPTED non-reserved +UNION reserved reserved reserved reserved +UNIQUE reserved reserved reserved reserved +UNKNOWN non-reserved reserved reserved reserved +UNLINK non-reserved non-reserved +UNLISTEN non-reserved +UNLOGGED non-reserved +UNNAMED non-reserved non-reserved non-reserved +UNNEST reserved reserved +UNTIL non-reserved +UNTYPED non-reserved non-reserved +UPDATE non-reserved reserved reserved reserved +UPPER reserved reserved reserved +URI non-reserved non-reserved +USAGE non-reserved non-reserved reserved +USER reserved reserved reserved reserved +USER_DEFINED_TYPE_CATALOG non-reserved non-reserved +USER_DEFINED_TYPE_CODE non-reserved non-reserved +USER_DEFINED_TYPE_NAME non-reserved non-reserved +USER_DEFINED_TYPE_SCHEMA non-reserved non-reserved +USING reserved reserved reserved reserved +VACUUM non-reserved +VALID non-reserved non-reserved non-reserved +VALIDATE non-reserved +VALIDATOR non-reserved +VALUE non-reserved reserved reserved reserved +VALUES non-reserved (cannot be function or type) reserved reserved reserved +VALUE_OF reserved +VARBINARY reserved reserved +VARCHAR non-reserved (cannot be function or type) reserved reserved reserved +VARIADIC reserved +VARYING non-reserved reserved reserved reserved +VAR_POP reserved reserved +VAR_SAMP reserved reserved +VERBOSE reserved (can be function or type) +VERSION non-reserved non-reserved non-reserved +VERSIONING reserved +VIEW non-reserved non-reserved non-reserved reserved +VOLATILE non-reserved +WHEN reserved reserved reserved reserved +WHENEVER reserved reserved reserved +WHERE reserved reserved reserved reserved +WHITESPACE non-reserved non-reserved non-reserved +WIDTH_BUCKET reserved reserved +WINDOW reserved reserved reserved +WITH reserved reserved reserved reserved +WITHIN reserved reserved +WITHOUT non-reserved reserved reserved +WORK non-reserved non-reserved non-reserved reserved +WRAPPER non-reserved non-reserved non-reserved +WRITE non-reserved non-reserved non-reserved reserved +XML non-reserved reserved reserved +XMLAGG reserved reserved +XMLATTRIBUTES non-reserved (cannot be function or type) reserved reserved +XMLBINARY reserved reserved +XMLCAST reserved reserved +XMLCOMMENT reserved reserved +XMLCONCAT non-reserved (cannot be function or type) reserved reserved +XMLDECLARATION non-reserved non-reserved +XMLDOCUMENT reserved reserved +XMLELEMENT non-reserved (cannot be function or type) reserved reserved +XMLEXISTS non-reserved (cannot be function or type) reserved reserved +XMLFOREST non-reserved (cannot be function or type) reserved reserved +XMLITERATE reserved reserved +XMLNAMESPACES reserved reserved +XMLPARSE non-reserved (cannot be function or type) reserved reserved +XMLPI non-reserved (cannot be function or type) reserved reserved +XMLQUERY reserved reserved +XMLROOT non-reserved (cannot be function or type) +XMLSCHEMA non-reserved non-reserved +XMLSERIALIZE non-reserved (cannot be function or type) reserved reserved +XMLTABLE reserved reserved +XMLTEXT reserved reserved +XMLVALIDATE reserved reserved +YEAR non-reserved reserved reserved reserved +YES non-reserved non-reserved non-reserved +ZONE non-reserved non-reserved non-reserved reserved \ No newline at end of file diff --git a/vendor/xorm.io/xorm/dialects/postgres.go b/vendor/xorm.io/xorm/dialects/postgres.go new file mode 100644 index 0000000..1996c49 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/postgres.go @@ -0,0 +1,1349 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +// from http://www.postgresql.org/docs/current/static/sql-keywords-appendix.html +var ( + postgresReservedWords = map[string]bool{ + "A": true, + "ABORT": true, + "ABS": true, + "ABSENT": true, + "ABSOLUTE": true, + "ACCESS": true, + "ACCORDING": true, + "ACTION": true, + "ADA": true, + "ADD": true, + "ADMIN": true, + "AFTER": true, + "AGGREGATE": true, + "ALL": true, + "ALLOCATE": true, + "ALSO": true, + "ALTER": true, + "ALWAYS": true, + "ANALYSE": true, + "ANALYZE": true, + "AND": true, + "ANY": true, + "ARE": true, + "ARRAY": true, + "ARRAY_AGG": true, + "ARRAY_MAX_CARDINALITY": true, + "AS": true, + "ASC": true, + "ASENSITIVE": true, + "ASSERTION": true, + "ASSIGNMENT": true, + "ASYMMETRIC": true, + "AT": true, + "ATOMIC": true, + "ATTRIBUTE": true, + "ATTRIBUTES": true, + "AUTHORIZATION": true, + "AVG": true, + "BACKWARD": true, + "BASE64": true, + "BEFORE": true, + "BEGIN": true, + "BEGIN_FRAME": true, + "BEGIN_PARTITION": true, + "BERNOULLI": true, + "BETWEEN": true, + "BIGINT": true, + "BINARY": true, + "BIT": true, + "BIT_LENGTH": true, + "BLOB": true, + "BLOCKED": true, + "BOM": true, + "BOOLEAN": true, + "BOTH": true, + "BREADTH": true, + "BY": true, + "C": true, + "CACHE": true, + "CALL": true, + "CALLED": true, + "CARDINALITY": true, + "CASCADE": true, + "CASCADED": true, + "CASE": true, + "CAST": true, + "CATALOG": true, + "CATALOG_NAME": true, + "CEIL": true, + "CEILING": true, + "CHAIN": true, + "CHAR": true, + "CHARACTER": true, + "CHARACTERISTICS": true, + "CHARACTERS": true, + "CHARACTER_LENGTH": true, + "CHARACTER_SET_CATALOG": true, + "CHARACTER_SET_NAME": true, + "CHARACTER_SET_SCHEMA": true, + "CHAR_LENGTH": true, + "CHECK": true, + "CHECKPOINT": true, + "CLASS": true, + "CLASS_ORIGIN": true, + "CLOB": true, + "CLOSE": true, + "CLUSTER": true, + "COALESCE": true, + "COBOL": true, + "COLLATE": true, + "COLLATION": true, + "COLLATION_CATALOG": true, + "COLLATION_NAME": true, + "COLLATION_SCHEMA": true, + "COLLECT": true, + "COLUMN": true, + "COLUMNS": true, + "COLUMN_NAME": true, + "COMMAND_FUNCTION": true, + "COMMAND_FUNCTION_CODE": true, + "COMMENT": true, + "COMMENTS": true, + "COMMIT": true, + "COMMITTED": true, + "CONCURRENTLY": true, + "CONDITION": true, + "CONDITION_NUMBER": true, + "CONFIGURATION": true, + "CONNECT": true, + "CONNECTION": true, + "CONNECTION_NAME": true, + "CONSTRAINT": true, + "CONSTRAINTS": true, + "CONSTRAINT_CATALOG": true, + "CONSTRAINT_NAME": true, + "CONSTRAINT_SCHEMA": true, + "CONSTRUCTOR": true, + "CONTAINS": true, + "CONTENT": true, + "CONTINUE": true, + "CONTROL": true, + "CONVERSION": true, + "CONVERT": true, + "COPY": true, + "CORR": true, + "CORRESPONDING": true, + "COST": true, + "COUNT": true, + "COVAR_POP": true, + "COVAR_SAMP": true, + "CREATE": true, + "CROSS": true, + "CSV": true, + "CUBE": true, + "CUME_DIST": true, + "CURRENT": true, + "CURRENT_CATALOG": true, + "CURRENT_DATE": true, + "CURRENT_DEFAULT_TRANSFORM_GROUP": true, + "CURRENT_PATH": true, + "CURRENT_ROLE": true, + "CURRENT_ROW": true, + "CURRENT_SCHEMA": true, + "CURRENT_TIME": true, + "CURRENT_TIMESTAMP": true, + "CURRENT_TRANSFORM_GROUP_FOR_TYPE": true, + "CURRENT_USER": true, + "CURSOR": true, + "CURSOR_NAME": true, + "CYCLE": true, + "DATA": true, + "DATABASE": true, + "DATALINK": true, + "DATE": true, + "DATETIME_INTERVAL_CODE": true, + "DATETIME_INTERVAL_PRECISION": true, + "DAY": true, + "DB": true, + "DEALLOCATE": true, + "DEC": true, + "DECIMAL": true, + "DECLARE": true, + "DEFAULT": true, + "DEFAULTS": true, + "DEFERRABLE": true, + "DEFERRED": true, + "DEFINED": true, + "DEFINER": true, + "DEGREE": true, + "DELETE": true, + "DELIMITER": true, + "DELIMITERS": true, + "DENSE_RANK": true, + "DEPTH": true, + "DEREF": true, + "DERIVED": true, + "DESC": true, + "DESCRIBE": true, + "DESCRIPTOR": true, + "DETERMINISTIC": true, + "DIAGNOSTICS": true, + "DICTIONARY": true, + "DISABLE": true, + "DISCARD": true, + "DISCONNECT": true, + "DISPATCH": true, + "DISTINCT": true, + "DLNEWCOPY": true, + "DLPREVIOUSCOPY": true, + "DLURLCOMPLETE": true, + "DLURLCOMPLETEONLY": true, + "DLURLCOMPLETEWRITE": true, + "DLURLPATH": true, + "DLURLPATHONLY": true, + "DLURLPATHWRITE": true, + "DLURLSCHEME": true, + "DLURLSERVER": true, + "DLVALUE": true, + "DO": true, + "DOCUMENT": true, + "DOMAIN": true, + "DOUBLE": true, + "DROP": true, + "DYNAMIC": true, + "DYNAMIC_FUNCTION": true, + "DYNAMIC_FUNCTION_CODE": true, + "EACH": true, + "ELEMENT": true, + "ELSE": true, + "EMPTY": true, + "ENABLE": true, + "ENCODING": true, + "ENCRYPTED": true, + "END": true, + "END-EXEC": true, + "END_FRAME": true, + "END_PARTITION": true, + "ENFORCED": true, + "ENUM": true, + "EQUALS": true, + "ESCAPE": true, + "EVENT": true, + "EVERY": true, + "EXCEPT": true, + "EXCEPTION": true, + "EXCLUDE": true, + "EXCLUDING": true, + "EXCLUSIVE": true, + "EXEC": true, + "EXECUTE": true, + "EXISTS": true, + "EXP": true, + "EXPLAIN": true, + "EXPRESSION": true, + "EXTENSION": true, + "EXTERNAL": true, + "EXTRACT": true, + "FALSE": true, + "FAMILY": true, + "FETCH": true, + "FILE": true, + "FILTER": true, + "FINAL": true, + "FIRST": true, + "FIRST_VALUE": true, + "FLAG": true, + "FLOAT": true, + "FLOOR": true, + "FOLLOWING": true, + "FOR": true, + "FORCE": true, + "FOREIGN": true, + "FORTRAN": true, + "FORWARD": true, + "FOUND": true, + "FRAME_ROW": true, + "FREE": true, + "FREEZE": true, + "FROM": true, + "FS": true, + "FULL": true, + "FUNCTION": true, + "FUNCTIONS": true, + "FUSION": true, + "G": true, + "GENERAL": true, + "GENERATED": true, + "GET": true, + "GLOBAL": true, + "GO": true, + "GOTO": true, + "GRANT": true, + "GRANTED": true, + "GREATEST": true, + "GROUP": true, + "GROUPING": true, + "GROUPS": true, + "HANDLER": true, + "HAVING": true, + "HEADER": true, + "HEX": true, + "HIERARCHY": true, + "HOLD": true, + "HOUR": true, + "ID": true, + "IDENTITY": true, + "IF": true, + "IGNORE": true, + "ILIKE": true, + "IMMEDIATE": true, + "IMMEDIATELY": true, + "IMMUTABLE": true, + "IMPLEMENTATION": true, + "IMPLICIT": true, + "IMPORT": true, + "IN": true, + "INCLUDING": true, + "INCREMENT": true, + "INDENT": true, + "INDEX": true, + "INDEXES": true, + "INDICATOR": true, + "INHERIT": true, + "INHERITS": true, + "INITIALLY": true, + "INLINE": true, + "INNER": true, + "INOUT": true, + "INPUT": true, + "INSENSITIVE": true, + "INSERT": true, + "INSTANCE": true, + "INSTANTIABLE": true, + "INSTEAD": true, + "INT": true, + "INTEGER": true, + "INTEGRITY": true, + "INTERSECT": true, + "INTERSECTION": true, + "INTERVAL": true, + "INTO": true, + "INVOKER": true, + "IS": true, + "ISNULL": true, + "ISOLATION": true, + "JOIN": true, + "K": true, + "KEY": true, + "KEY_MEMBER": true, + "KEY_TYPE": true, + "LABEL": true, + "LAG": true, + "LANGUAGE": true, + "LARGE": true, + "LAST": true, + "LAST_VALUE": true, + "LATERAL": true, + "LC_COLLATE": true, + "LC_CTYPE": true, + "LEAD": true, + "LEADING": true, + "LEAKPROOF": true, + "LEAST": true, + "LEFT": true, + "LENGTH": true, + "LEVEL": true, + "LIBRARY": true, + "LIKE": true, + "LIKE_REGEX": true, + "LIMIT": true, + "LINK": true, + "LISTEN": true, + "LN": true, + "LOAD": true, + "LOCAL": true, + "LOCALTIME": true, + "LOCALTIMESTAMP": true, + "LOCATION": true, + "LOCATOR": true, + "LOCK": true, + "LOWER": true, + "M": true, + "MAP": true, + "MAPPING": true, + "MATCH": true, + "MATCHED": true, + "MATERIALIZED": true, + "MAX": true, + "MAXVALUE": true, + "MAX_CARDINALITY": true, + "MEMBER": true, + "MERGE": true, + "MESSAGE_LENGTH": true, + "MESSAGE_OCTET_LENGTH": true, + "MESSAGE_TEXT": true, + "METHOD": true, + "MIN": true, + "MINUTE": true, + "MINVALUE": true, + "MOD": true, + "MODE": true, + "MODIFIES": true, + "MODULE": true, + "MONTH": true, + "MORE": true, + "MOVE": true, + "MULTISET": true, + "MUMPS": true, + "NAME": true, + "NAMES": true, + "NAMESPACE": true, + "NATIONAL": true, + "NATURAL": true, + "NCHAR": true, + "NCLOB": true, + "NESTING": true, + "NEW": true, + "NEXT": true, + "NFC": true, + "NFD": true, + "NFKC": true, + "NFKD": true, + "NIL": true, + "NO": true, + "NONE": true, + "NORMALIZE": true, + "NORMALIZED": true, + "NOT": true, + "NOTHING": true, + "NOTIFY": true, + "NOTNULL": true, + "NOWAIT": true, + "NTH_VALUE": true, + "NTILE": true, + "NULL": true, + "NULLABLE": true, + "NULLIF": true, + "NULLS": true, + "NUMBER": true, + "NUMERIC": true, + "OBJECT": true, + "OCCURRENCES_REGEX": true, + "OCTETS": true, + "OCTET_LENGTH": true, + "OF": true, + "OFF": true, + "OFFSET": true, + "OIDS": true, + "OLD": true, + "ON": true, + "ONLY": true, + "OPEN": true, + "OPERATOR": true, + "OPTION": true, + "OPTIONS": true, + "OR": true, + "ORDER": true, + "ORDERING": true, + "ORDINALITY": true, + "OTHERS": true, + "OUT": true, + "OUTER": true, + "OUTPUT": true, + "OVER": true, + "OVERLAPS": true, + "OVERLAY": true, + "OVERRIDING": true, + "OWNED": true, + "OWNER": true, + "P": true, + "PAD": true, + "PARAMETER": true, + "PARAMETER_MODE": true, + "PARAMETER_NAME": true, + "PARAMETER_ORDINAL_POSITION": true, + "PARAMETER_SPECIFIC_CATALOG": true, + "PARAMETER_SPECIFIC_NAME": true, + "PARAMETER_SPECIFIC_SCHEMA": true, + "PARSER": true, + "PARTIAL": true, + "PARTITION": true, + "PASCAL": true, + "PASSING": true, + "PASSTHROUGH": true, + "PASSWORD": true, + "PATH": true, + "PERCENT": true, + "PERCENTILE_CONT": true, + "PERCENTILE_DISC": true, + "PERCENT_RANK": true, + "PERIOD": true, + "PERMISSION": true, + "PLACING": true, + "PLANS": true, + "PLI": true, + "PORTION": true, + "POSITION": true, + "POSITION_REGEX": true, + "POWER": true, + "PRECEDES": true, + "PRECEDING": true, + "PRECISION": true, + "PREPARE": true, + "PREPARED": true, + "PRESERVE": true, + "PRIMARY": true, + "PRIOR": true, + "PRIVILEGES": true, + "PROCEDURAL": true, + "PROCEDURE": true, + "PROGRAM": true, + "PUBLIC": true, + "QUOTE": true, + "RANGE": true, + "RANK": true, + "READ": true, + "READS": true, + "REAL": true, + "REASSIGN": true, + "RECHECK": true, + "RECOVERY": true, + "RECURSIVE": true, + "REF": true, + "REFERENCES": true, + "REFERENCING": true, + "REFRESH": true, + "REGR_AVGX": true, + "REGR_AVGY": true, + "REGR_COUNT": true, + "REGR_INTERCEPT": true, + "REGR_R2": true, + "REGR_SLOPE": true, + "REGR_SXX": true, + "REGR_SXY": true, + "REGR_SYY": true, + "REINDEX": true, + "RELATIVE": true, + "RELEASE": true, + "RENAME": true, + "REPEATABLE": true, + "REPLACE": true, + "REPLICA": true, + "REQUIRING": true, + "RESET": true, + "RESPECT": true, + "RESTART": true, + "RESTORE": true, + "RESTRICT": true, + "RESULT": true, + "RETURN": true, + "RETURNED_CARDINALITY": true, + "RETURNED_LENGTH": true, + "RETURNED_OCTET_LENGTH": true, + "RETURNED_SQLSTATE": true, + "RETURNING": true, + "RETURNS": true, + "REVOKE": true, + "RIGHT": true, + "ROLE": true, + "ROLLBACK": true, + "ROLLUP": true, + "ROUTINE": true, + "ROUTINE_CATALOG": true, + "ROUTINE_NAME": true, + "ROUTINE_SCHEMA": true, + "ROW": true, + "ROWS": true, + "ROW_COUNT": true, + "ROW_NUMBER": true, + "RULE": true, + "SAVEPOINT": true, + "SCALE": true, + "SCHEMA": true, + "SCHEMA_NAME": true, + "SCOPE": true, + "SCOPE_CATALOG": true, + "SCOPE_NAME": true, + "SCOPE_SCHEMA": true, + "SCROLL": true, + "SEARCH": true, + "SECOND": true, + "SECTION": true, + "SECURITY": true, + "SELECT": true, + "SELECTIVE": true, + "SELF": true, + "SENSITIVE": true, + "SEQUENCE": true, + "SEQUENCES": true, + "SERIALIZABLE": true, + "SERVER": true, + "SERVER_NAME": true, + "SESSION": true, + "SESSION_USER": true, + "SET": true, + "SETOF": true, + "SETS": true, + "SHARE": true, + "SHOW": true, + "SIMILAR": true, + "SIMPLE": true, + "SIZE": true, + "SMALLINT": true, + "SNAPSHOT": true, + "SOME": true, + "SOURCE": true, + "SPACE": true, + "SPECIFIC": true, + "SPECIFICTYPE": true, + "SPECIFIC_NAME": true, + "SQL": true, + "SQLCODE": true, + "SQLERROR": true, + "SQLEXCEPTION": true, + "SQLSTATE": true, + "SQLWARNING": true, + "SQRT": true, + "STABLE": true, + "STANDALONE": true, + "START": true, + "STATE": true, + "STATEMENT": true, + "STATIC": true, + "STATISTICS": true, + "STDDEV_POP": true, + "STDDEV_SAMP": true, + "STDIN": true, + "STDOUT": true, + "STORAGE": true, + "STRICT": true, + "STRIP": true, + "STRUCTURE": true, + "STYLE": true, + "SUBCLASS_ORIGIN": true, + "SUBMULTISET": true, + "SUBSTRING": true, + "SUBSTRING_REGEX": true, + "SUCCEEDS": true, + "SUM": true, + "SYMMETRIC": true, + "SYSID": true, + "SYSTEM": true, + "SYSTEM_TIME": true, + "SYSTEM_USER": true, + "T": true, + "TABLE": true, + "TABLES": true, + "TABLESAMPLE": true, + "TABLESPACE": true, + "TABLE_NAME": true, + "TEMP": true, + "TEMPLATE": true, + "TEMPORARY": true, + "TEXT": true, + "THEN": true, + "TIES": true, + "TIME": true, + "TIMESTAMP": true, + "TIMEZONE_HOUR": true, + "TIMEZONE_MINUTE": true, + "TO": true, + "TOKEN": true, + "TOP_LEVEL_COUNT": true, + "TRAILING": true, + "TRANSACTION": true, + "TRANSACTIONS_COMMITTED": true, + "TRANSACTIONS_ROLLED_BACK": true, + "TRANSACTION_ACTIVE": true, + "TRANSFORM": true, + "TRANSFORMS": true, + "TRANSLATE": true, + "TRANSLATE_REGEX": true, + "TRANSLATION": true, + "TREAT": true, + "TRIGGER": true, + "TRIGGER_CATALOG": true, + "TRIGGER_NAME": true, + "TRIGGER_SCHEMA": true, + "TRIM": true, + "TRIM_ARRAY": true, + "TRUE": true, + "TRUNCATE": true, + "TRUSTED": true, + "TYPE": true, + "TYPES": true, + "UESCAPE": true, + "UNBOUNDED": true, + "UNCOMMITTED": true, + "UNDER": true, + "UNENCRYPTED": true, + "UNION": true, + "UNIQUE": true, + "UNKNOWN": true, + "UNLINK": true, + "UNLISTEN": true, + "UNLOGGED": true, + "UNNAMED": true, + "UNNEST": true, + "UNTIL": true, + "UNTYPED": true, + "UPDATE": true, + "UPPER": true, + "URI": true, + "USAGE": true, + "USER": true, + "USER_DEFINED_TYPE_CATALOG": true, + "USER_DEFINED_TYPE_CODE": true, + "USER_DEFINED_TYPE_NAME": true, + "USER_DEFINED_TYPE_SCHEMA": true, + "USING": true, + "VACUUM": true, + "VALID": true, + "VALIDATE": true, + "VALIDATOR": true, + "VALUE": true, + "VALUES": true, + "VALUE_OF": true, + "VARBINARY": true, + "VARCHAR": true, + "VARIADIC": true, + "VARYING": true, + "VAR_POP": true, + "VAR_SAMP": true, + "VERBOSE": true, + "VERSION": true, + "VERSIONING": true, + "VIEW": true, + "VOLATILE": true, + "WHEN": true, + "WHENEVER": true, + "WHERE": true, + "WHITESPACE": true, + "WIDTH_BUCKET": true, + "WINDOW": true, + "WITH": true, + "WITHIN": true, + "WITHOUT": true, + "WORK": true, + "WRAPPER": true, + "WRITE": true, + "XML": true, + "XMLAGG": true, + "XMLATTRIBUTES": true, + "XMLBINARY": true, + "XMLCAST": true, + "XMLCOMMENT": true, + "XMLCONCAT": true, + "XMLDECLARATION": true, + "XMLDOCUMENT": true, + "XMLELEMENT": true, + "XMLEXISTS": true, + "XMLFOREST": true, + "XMLITERATE": true, + "XMLNAMESPACES": true, + "XMLPARSE": true, + "XMLPI": true, + "XMLQUERY": true, + "XMLROOT": true, + "XMLSCHEMA": true, + "XMLSERIALIZE": true, + "XMLTABLE": true, + "XMLTEXT": true, + "XMLVALIDATE": true, + "YEAR": true, + "YES": true, + "ZONE": true, + } + + postgresQuoter = schemas.Quoter{ + Prefix: '"', + Suffix: '"', + IsReserved: schemas.AlwaysReserve, + } +) + +var ( + // DefaultPostgresSchema default postgres schema + DefaultPostgresSchema = "public" +) + +type postgres struct { + Base +} + +func (db *postgres) Init(uri *URI) error { + db.quoter = postgresQuoter + return db.Base.Init(db, uri) +} + +func (db *postgres) getSchema() string { + if db.uri.Schema != "" { + return db.uri.Schema + } + return DefaultPostgresSchema +} + +func (db *postgres) needQuote(name string) bool { + if db.IsReserved(name) { + return true + } + for _, c := range name { + if c >= 'A' && c <= 'Z' { + return true + } + } + return false +} + +func (db *postgres) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = postgresQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = postgresQuoter + q.IsReserved = db.needQuote + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = postgresQuoter + } +} + +func (db *postgres) SQLType(c *schemas.Column) string { + var res string + switch t := c.SQLType.Name; t { + case schemas.TinyInt: + res = schemas.SmallInt + return res + case schemas.Bit: + res = schemas.Boolean + return res + case schemas.MediumInt, schemas.Int, schemas.Integer: + if c.IsAutoIncrement { + return schemas.Serial + } + return schemas.Integer + case schemas.BigInt: + if c.IsAutoIncrement { + return schemas.BigSerial + } + return schemas.BigInt + case schemas.Serial, schemas.BigSerial: + c.IsAutoIncrement = true + c.Nullable = false + res = t + case schemas.Binary, schemas.VarBinary: + return schemas.Bytea + case schemas.DateTime: + res = schemas.TimeStamp + case schemas.TimeStampz: + return "timestamp with time zone" + case schemas.Float: + res = schemas.Real + case schemas.TinyText, schemas.MediumText, schemas.LongText: + res = schemas.Text + case schemas.NVarchar: + res = schemas.Varchar + case schemas.Uuid: + return schemas.Uuid + case schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob: + return schemas.Bytea + case schemas.Double: + return "DOUBLE PRECISION" + default: + if c.IsAutoIncrement { + return schemas.Serial + } + res = t + } + + if strings.EqualFold(res, "bool") { + // for bool, we don't need length information + return res + } + hasLen1 := (c.Length > 0) + hasLen2 := (c.Length2 > 0) + + if hasLen2 { + res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" + } else if hasLen1 { + res += "(" + strconv.Itoa(c.Length) + ")" + } + return res +} + +func (db *postgres) IsReserved(name string) bool { + _, ok := postgresReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *postgres) AutoIncrStr() string { + return "" +} + +func (db *postgres) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql string + sql = "CREATE TABLE IF NOT EXISTS " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + sql += quoter.Quote(tableName) + sql += " (" + + if len(table.ColumnsSeq()) > 0 { + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + s, _ := ColumnString(db, col, col.IsPrimaryKey && len(pkList) == 1) + sql += s + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + } + sql += ")" + + return []string{sql}, true +} + +func (db *postgres) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + if len(db.getSchema()) == 0 { + args := []interface{}{tableName, idxName} + return `SELECT indexname FROM pg_indexes WHERE tablename = ? AND indexname = ?`, args + } + + args := []interface{}{db.getSchema(), tableName, idxName} + return `SELECT indexname FROM pg_indexes ` + + `WHERE schemaname = ? AND tablename = ? AND indexname = ?`, args +} + +func (db *postgres) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + if len(db.getSchema()) == 0 { + return db.HasRecords(queryer, ctx, `SELECT tablename FROM pg_tables WHERE tablename = $1`, tableName) + } + + return db.HasRecords(queryer, ctx, `SELECT tablename FROM pg_tables WHERE schemaname = $1 AND tablename = $2`, + db.getSchema(), tableName) +} + +func (db *postgres) ModifyColumnSQL(tableName string, col *schemas.Column) string { + if len(db.getSchema()) == 0 || strings.Contains(tableName, ".") { + return fmt.Sprintf("alter table %s ALTER COLUMN %s TYPE %s", + tableName, col.Name, db.SQLType(col)) + } + return fmt.Sprintf("alter table %s.%s ALTER COLUMN %s TYPE %s", + db.getSchema(), tableName, col.Name, db.SQLType(col)) +} + +func (db *postgres) DropIndexSQL(tableName string, index *schemas.Index) string { + idxName := index.Name + + tableParts := strings.Split(strings.Replace(tableName, `"`, "", -1), ".") + tableName = tableParts[len(tableParts)-1] + + if !strings.HasPrefix(idxName, "UQE_") && + !strings.HasPrefix(idxName, "IDX_") { + if index.Type == schemas.UniqueType { + idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name) + } else { + idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name) + } + } + if db.getSchema() != "" { + idxName = db.getSchema() + "." + idxName + } + return fmt.Sprintf("DROP INDEX %v", db.Quoter().Quote(idxName)) +} + +func (db *postgres) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + args := []interface{}{db.getSchema(), tableName, colName} + query := "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = $1 AND table_name = $2" + + " AND column_name = $3" + if len(db.getSchema()) == 0 { + args = []interface{}{tableName, colName} + query = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1" + + " AND column_name = $2" + } + + rows, err := queryer.QueryContext(ctx, query, args...) + if err != nil { + return false, err + } + defer rows.Close() + + return rows.Next(), nil +} + +func (db *postgres) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{tableName} + s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, + CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey, + CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey +FROM pg_attribute f + JOIN pg_class c ON c.oid = f.attrelid JOIN pg_type t ON t.oid = f.atttypid + LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum + LEFT JOIN pg_namespace n ON n.oid = c.relnamespace + LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey) + LEFT JOIN pg_class AS g ON p.confrelid = g.oid + LEFT JOIN INFORMATION_SCHEMA.COLUMNS s ON s.column_name=f.attname AND c.relname=s.table_name +WHERE n.nspname= s.table_schema AND c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.attnum;` + + schema := db.getSchema() + if schema != "" { + s = fmt.Sprintf(s, "AND s.table_schema = $2") + args = append(args, schema) + } else { + s = fmt.Sprintf(s, "") + } + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + + for rows.Next() { + col := new(schemas.Column) + col.Indexes = make(map[string]int) + + var colName, isNullable, dataType string + var maxLenStr, colDefault *string + var isPK, isUnique bool + err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &isPK, &isUnique) + if err != nil { + return nil, nil, err + } + + var maxLen int + if maxLenStr != nil { + maxLen, err = strconv.Atoi(*maxLenStr) + if err != nil { + return nil, nil, err + } + } + + col.Name = strings.Trim(colName, `" `) + + if colDefault != nil { + var theDefault = *colDefault + // cockroach has type with the default value with ::: + // and postgres with ::, we should remove them before store them + idx := strings.Index(theDefault, ":::") + if idx == -1 { + idx = strings.Index(theDefault, "::") + } + if idx > -1 { + theDefault = theDefault[:idx] + } + + if strings.HasSuffix(theDefault, "+00:00'") { + theDefault = theDefault[:len(theDefault)-7] + "'" + } + + col.Default = theDefault + col.DefaultIsEmpty = false + if strings.HasPrefix(col.Default, "nextval(") { + col.IsAutoIncrement = true + col.Default = "" + col.DefaultIsEmpty = true + } + } else { + col.DefaultIsEmpty = true + } + + if isPK { + col.IsPrimaryKey = true + } + + col.Nullable = (isNullable == "YES") + + switch strings.ToLower(dataType) { + case "character varying", "character", "string": + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 0, DefaultLength2: 0} + case "timestamp without time zone": + col.SQLType = schemas.SQLType{Name: schemas.DateTime, DefaultLength: 0, DefaultLength2: 0} + case "timestamp with time zone": + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + case "double precision": + col.SQLType = schemas.SQLType{Name: schemas.Double, DefaultLength: 0, DefaultLength2: 0} + case "boolean": + col.SQLType = schemas.SQLType{Name: schemas.Bool, DefaultLength: 0, DefaultLength2: 0} + case "time without time zone": + col.SQLType = schemas.SQLType{Name: schemas.Time, DefaultLength: 0, DefaultLength2: 0} + case "bytes": + col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} + case "oid": + col.SQLType = schemas.SQLType{Name: schemas.BigInt, DefaultLength: 0, DefaultLength2: 0} + case "array": + col.SQLType = schemas.SQLType{Name: schemas.Array, DefaultLength: 0, DefaultLength2: 0} + default: + startIdx := strings.Index(strings.ToLower(dataType), "string(") + if startIdx != -1 && strings.HasSuffix(dataType, ")") { + length := dataType[startIdx+8 : len(dataType)-1] + l, _ := strconv.Atoi(length) + col.SQLType = schemas.SQLType{Name: "STRING", DefaultLength: l, DefaultLength2: 0} + } else { + col.SQLType = schemas.SQLType{Name: strings.ToUpper(dataType), DefaultLength: 0, DefaultLength2: 0} + } + } + if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { + return nil, nil, fmt.Errorf("Unknown colType: %s - %s", dataType, col.SQLType.Name) + } + + col.Length = maxLen + + if !col.DefaultIsEmpty { + if col.SQLType.IsText() { + if strings.HasSuffix(col.Default, "::character varying") { + col.Default = strings.TrimRight(col.Default, "::character varying") + } else if !strings.HasPrefix(col.Default, "'") { + col.Default = "'" + col.Default + "'" + } + } else if col.SQLType.IsTime() { + if strings.HasSuffix(col.Default, "::timestamp without time zone") { + col.Default = strings.TrimRight(col.Default, "::timestamp without time zone") + } + } + } + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + + return colSeq, cols, nil +} + +func (db *postgres) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{} + s := "SELECT tablename FROM pg_tables" + schema := db.getSchema() + if schema != "" { + args = append(args, schema) + s = s + " WHERE schemaname = $1" + } + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + var name string + err = rows.Scan(&name) + if err != nil { + return nil, err + } + table.Name = name + tables = append(tables, table) + } + return tables, nil +} + +func getIndexColName(indexdef string) []string { + var colNames []string + + cs := strings.Split(indexdef, "(") + for _, v := range strings.Split(strings.Split(cs[1], ")")[0], ",") { + colNames = append(colNames, strings.Split(strings.TrimLeft(v, " "), " ")[0]) + } + + return colNames +} + +func (db *postgres) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{tableName} + s := fmt.Sprintf("SELECT indexname, indexdef FROM pg_indexes WHERE tablename=$1") + if len(db.getSchema()) != 0 { + args = append(args, db.getSchema()) + s = s + " AND schemaname=$2" + } + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var indexType int + var indexName, indexdef string + var colNames []string + err = rows.Scan(&indexName, &indexdef) + if err != nil { + return nil, err + } + + if indexName == "primary" { + continue + } + indexName = strings.Trim(indexName, `" `) + if strings.HasSuffix(indexName, "_pkey") { + continue + } + if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") { + indexType = schemas.UniqueType + } else { + indexType = schemas.IndexType + } + colNames = getIndexColName(indexdef) + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + newIdxName := indexName[5+len(tableName):] + isRegular = true + if newIdxName != "" { + indexName = newIdxName + } + } + + index := &schemas.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)} + for _, colName := range colNames { + index.Cols = append(index.Cols, strings.TrimSpace(strings.Replace(colName, `"`, "", -1))) + } + index.IsRegular = isRegular + indexes[index.Name] = index + } + return indexes, nil +} + +func (db *postgres) Filters() []Filter { + return []Filter{&SeqFilter{Prefix: "$", Start: 1}} +} + +type pqDriver struct { +} + +type values map[string]string + +func (vs values) Set(k, v string) { + vs[k] = v +} + +func (vs values) Get(k string) (v string) { + return vs[k] +} + +func parseURL(connstr string) (string, error) { + u, err := url.Parse(connstr) + if err != nil { + return "", err + } + + if u.Scheme != "postgresql" && u.Scheme != "postgres" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + + if u.Path != "" { + return escaper.Replace(u.Path[1:]), nil + } + + return "", nil +} + +func parseOpts(name string, o values) error { + if len(name) == 0 { + return fmt.Errorf("invalid options: %s", name) + } + + name = strings.TrimSpace(name) + + ps := strings.Split(name, " ") + for _, p := range ps { + kv := strings.Split(p, "=") + if len(kv) < 2 { + return fmt.Errorf("invalid option: %q", p) + } + o.Set(kv[0], kv[1]) + } + + return nil +} + +func (p *pqDriver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.POSTGRES} + var err error + + if strings.HasPrefix(dataSourceName, "postgresql://") || strings.HasPrefix(dataSourceName, "postgres://") { + db.DBName, err = parseURL(dataSourceName) + if err != nil { + return nil, err + } + } else { + o := make(values) + err = parseOpts(dataSourceName, o) + if err != nil { + return nil, err + } + + db.DBName = o.Get("dbname") + } + + if db.DBName == "" { + return nil, errors.New("dbname is empty") + } + + return db, nil +} + +type pqDriverPgx struct { + pqDriver +} + +func (pgx *pqDriverPgx) Parse(driverName, dataSourceName string) (*URI, error) { + // Remove the leading characters for driver to work + if len(dataSourceName) >= 9 && dataSourceName[0] == 0 { + dataSourceName = dataSourceName[9:] + } + return pgx.pqDriver.Parse(driverName, dataSourceName) +} + +// QueryDefaultPostgresSchema returns the default postgres schema +func QueryDefaultPostgresSchema(ctx context.Context, queryer core.Queryer) (string, error) { + rows, err := queryer.QueryContext(ctx, "SHOW SEARCH_PATH") + if err != nil { + return "", err + } + defer rows.Close() + if rows.Next() { + var defaultSchema string + if err = rows.Scan(&defaultSchema); err != nil { + return "", err + } + parts := strings.Split(defaultSchema, ",") + return strings.TrimSpace(parts[len(parts)-1]), nil + } + + return "", errors.New("No default schema") +} diff --git a/vendor/xorm.io/xorm/dialects/quote.go b/vendor/xorm.io/xorm/dialects/quote.go new file mode 100644 index 0000000..da4e0dd --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/quote.go @@ -0,0 +1,15 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +// QuotePolicy describes quote handle policy +type QuotePolicy int + +// All QuotePolicies +const ( + QuotePolicyAlways QuotePolicy = iota + QuotePolicyNone + QuotePolicyReserved +) diff --git a/vendor/xorm.io/xorm/dialects/sqlite3.go b/vendor/xorm.io/xorm/dialects/sqlite3.go new file mode 100644 index 0000000..73f98be --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/sqlite3.go @@ -0,0 +1,529 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "database/sql" + "errors" + "fmt" + "regexp" + "strings" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +var ( + sqlite3ReservedWords = map[string]bool{ + "ABORT": true, + "ACTION": true, + "ADD": true, + "AFTER": true, + "ALL": true, + "ALTER": true, + "ANALYZE": true, + "AND": true, + "AS": true, + "ASC": true, + "ATTACH": true, + "AUTOINCREMENT": true, + "BEFORE": true, + "BEGIN": true, + "BETWEEN": true, + "BY": true, + "CASCADE": true, + "CASE": true, + "CAST": true, + "CHECK": true, + "COLLATE": true, + "COLUMN": true, + "COMMIT": true, + "CONFLICT": true, + "CONSTRAINT": true, + "CREATE": true, + "CROSS": true, + "CURRENT_DATE": true, + "CURRENT_TIME": true, + "CURRENT_TIMESTAMP": true, + "DATABASE": true, + "DEFAULT": true, + "DEFERRABLE": true, + "DEFERRED": true, + "DELETE": true, + "DESC": true, + "DETACH": true, + "DISTINCT": true, + "DROP": true, + "EACH": true, + "ELSE": true, + "END": true, + "ESCAPE": true, + "EXCEPT": true, + "EXCLUSIVE": true, + "EXISTS": true, + "EXPLAIN": true, + "FAIL": true, + "FOR": true, + "FOREIGN": true, + "FROM": true, + "FULL": true, + "GLOB": true, + "GROUP": true, + "HAVING": true, + "IF": true, + "IGNORE": true, + "IMMEDIATE": true, + "IN": true, + "INDEX": true, + "INDEXED": true, + "INITIALLY": true, + "INNER": true, + "INSERT": true, + "INSTEAD": true, + "INTERSECT": true, + "INTO": true, + "IS": true, + "ISNULL": true, + "JOIN": true, + "KEY": true, + "LEFT": true, + "LIKE": true, + "LIMIT": true, + "MATCH": true, + "NATURAL": true, + "NO": true, + "NOT": true, + "NOTNULL": true, + "NULL": true, + "OF": true, + "OFFSET": true, + "ON": true, + "OR": true, + "ORDER": true, + "OUTER": true, + "PLAN": true, + "PRAGMA": true, + "PRIMARY": true, + "QUERY": true, + "RAISE": true, + "RECURSIVE": true, + "REFERENCES": true, + "REGEXP": true, + "REINDEX": true, + "RELEASE": true, + "RENAME": true, + "REPLACE": true, + "RESTRICT": true, + "RIGHT": true, + "ROLLBACK": true, + "ROW": true, + "SAVEPOINT": true, + "SELECT": true, + "SET": true, + "TABLE": true, + "TEMP": true, + "TEMPORARY": true, + "THEN": true, + "TO": true, + "TRANSACTI": true, + "TRIGGER": true, + "UNION": true, + "UNIQUE": true, + "UPDATE": true, + "USING": true, + "VACUUM": true, + "VALUES": true, + "VIEW": true, + "VIRTUAL": true, + "WHEN": true, + "WHERE": true, + "WITH": true, + "WITHOUT": true, + } + + sqlite3Quoter = schemas.Quoter{ + Prefix: '`', + Suffix: '`', + IsReserved: schemas.AlwaysReserve, + } +) + +type sqlite3 struct { + Base +} + +func (db *sqlite3) Init(uri *URI) error { + db.quoter = sqlite3Quoter + return db.Base.Init(db, uri) +} + +func (db *sqlite3) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = sqlite3Quoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = sqlite3Quoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = sqlite3Quoter + } +} + +func (db *sqlite3) SQLType(c *schemas.Column) string { + switch t := c.SQLType.Name; t { + case schemas.Bool: + if c.Default == "true" { + c.Default = "1" + } else if c.Default == "false" { + c.Default = "0" + } + return schemas.Integer + case schemas.Date, schemas.DateTime, schemas.TimeStamp, schemas.Time: + return schemas.DateTime + case schemas.TimeStampz: + return schemas.Text + case schemas.Char, schemas.Varchar, schemas.NVarchar, schemas.TinyText, + schemas.Text, schemas.MediumText, schemas.LongText, schemas.Json: + return schemas.Text + case schemas.Bit, schemas.TinyInt, schemas.SmallInt, schemas.MediumInt, schemas.Int, schemas.Integer, schemas.BigInt: + return schemas.Integer + case schemas.Float, schemas.Double, schemas.Real: + return schemas.Real + case schemas.Decimal, schemas.Numeric: + return schemas.Numeric + case schemas.TinyBlob, schemas.Blob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea, schemas.Binary, schemas.VarBinary: + return schemas.Blob + case schemas.Serial, schemas.BigSerial: + c.IsPrimaryKey = true + c.IsAutoIncrement = true + c.Nullable = false + return schemas.Integer + default: + return t + } +} + +func (db *sqlite3) FormatBytes(bs []byte) string { + return fmt.Sprintf("X'%x'", bs) +} + +func (db *sqlite3) IsReserved(name string) bool { + _, ok := sqlite3ReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *sqlite3) AutoIncrStr() string { + return "AUTOINCREMENT" +} + +func (db *sqlite3) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{idxName} + return "SELECT name FROM sqlite_master WHERE type='index' and name = ?", args +} + +func (db *sqlite3) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + return db.HasRecords(queryer, ctx, "SELECT name FROM sqlite_master WHERE type='table' and name = ?", tableName) +} + +func (db *sqlite3) DropIndexSQL(tableName string, index *schemas.Index) string { + // var unique string + idxName := index.Name + + if !strings.HasPrefix(idxName, "UQE_") && + !strings.HasPrefix(idxName, "IDX_") { + if index.Type == schemas.UniqueType { + idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name) + } else { + idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name) + } + } + return fmt.Sprintf("DROP INDEX %v", db.Quoter().Quote(idxName)) +} + +func (db *sqlite3) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql string + sql = "CREATE TABLE IF NOT EXISTS " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + sql += quoter.Quote(tableName) + sql += " (" + + if len(table.ColumnsSeq()) > 0 { + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + s, _ := ColumnString(db, col, col.IsPrimaryKey && len(pkList) == 1) + sql += s + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + } + sql += ")" + + return []string{sql}, true +} + +func (db *sqlite3) ForUpdateSQL(query string) string { + return query +} + +func (db *sqlite3) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + query := "SELECT * FROM " + tableName + " LIMIT 0" + rows, err := queryer.QueryContext(ctx, query) + if err != nil { + return false, err + } + defer rows.Close() + + cols, err := rows.Columns() + if err != nil { + return false, err + } + + for _, col := range cols { + if strings.EqualFold(col, colName) { + return true, nil + } + } + + return false, nil +} + +// splitColStr splits a sqlite col strings as fields +func splitColStr(colStr string) []string { + colStr = strings.TrimSpace(colStr) + var results = make([]string, 0, 10) + var lastIdx int + var hasC, hasQuote bool + for i, c := range colStr { + if c == ' ' && !hasQuote { + if hasC { + results = append(results, colStr[lastIdx:i]) + hasC = false + } + } else { + if c == '\'' { + hasQuote = !hasQuote + } + if !hasC { + lastIdx = i + } + hasC = true + if i == len(colStr)-1 { + results = append(results, colStr[lastIdx:i+1]) + } + } + } + return results +} + +func parseString(colStr string) (*schemas.Column, error) { + fields := splitColStr(colStr) + col := new(schemas.Column) + col.Indexes = make(map[string]int) + col.Nullable = true + col.DefaultIsEmpty = true + + for idx, field := range fields { + if idx == 0 { + col.Name = strings.Trim(strings.Trim(field, "`[] "), `"`) + continue + } else if idx == 1 { + col.SQLType = schemas.SQLType{Name: field, DefaultLength: 0, DefaultLength2: 0} + continue + } + switch field { + case "PRIMARY": + col.IsPrimaryKey = true + case "AUTOINCREMENT": + col.IsAutoIncrement = true + case "NULL": + if fields[idx-1] == "NOT" { + col.Nullable = false + } else { + col.Nullable = true + } + case "DEFAULT": + col.Default = fields[idx+1] + col.DefaultIsEmpty = false + } + } + return col, nil +} + +func (db *sqlite3) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{tableName} + s := "SELECT sql FROM sqlite_master WHERE type='table' and name = ?" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + var name string + for rows.Next() { + err = rows.Scan(&name) + if err != nil { + return nil, nil, err + } + break + } + + if name == "" { + return nil, nil, errors.New("no table named " + tableName) + } + + nStart := strings.Index(name, "(") + nEnd := strings.LastIndex(name, ")") + reg := regexp.MustCompile(`[^\(,\)]*(\([^\(]*\))?`) + colCreates := reg.FindAllString(name[nStart+1:nEnd], -1) + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + + for _, colStr := range colCreates { + reg = regexp.MustCompile(`,\s`) + colStr = reg.ReplaceAllString(colStr, ",") + if strings.HasPrefix(strings.TrimSpace(colStr), "PRIMARY KEY") { + parts := strings.Split(strings.TrimSpace(colStr), "(") + if len(parts) == 2 { + pkCols := strings.Split(strings.TrimRight(strings.TrimSpace(parts[1]), ")"), ",") + for _, pk := range pkCols { + if col, ok := cols[strings.Trim(strings.TrimSpace(pk), "`")]; ok { + col.IsPrimaryKey = true + } + } + } + continue + } + + col, err := parseString(colStr) + if err != nil { + return colSeq, cols, err + } + + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + return colSeq, cols, nil +} + +func (db *sqlite3) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{} + s := "SELECT name FROM sqlite_master WHERE type='table'" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + err = rows.Scan(&table.Name) + if err != nil { + return nil, err + } + if table.Name == "sqlite_sequence" { + continue + } + tables = append(tables, table) + } + return tables, nil +} + +func (db *sqlite3) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{tableName} + s := "SELECT sql FROM sqlite_master WHERE type='index' and tbl_name = ?" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var tmpSQL sql.NullString + err = rows.Scan(&tmpSQL) + if err != nil { + return nil, err + } + + if !tmpSQL.Valid { + continue + } + sql := tmpSQL.String + + index := new(schemas.Index) + nNStart := strings.Index(sql, "INDEX") + nNEnd := strings.Index(sql, "ON") + if nNStart == -1 || nNEnd == -1 { + continue + } + + indexName := strings.Trim(sql[nNStart+6:nNEnd], "` []'\"") + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + index.Name = indexName[5+len(tableName):] + isRegular = true + } else { + index.Name = indexName + } + + if strings.HasPrefix(sql, "CREATE UNIQUE INDEX") { + index.Type = schemas.UniqueType + } else { + index.Type = schemas.IndexType + } + + nStart := strings.Index(sql, "(") + nEnd := strings.Index(sql, ")") + colIndexes := strings.Split(sql[nStart+1:nEnd], ",") + + index.Cols = make([]string, 0) + for _, col := range colIndexes { + index.Cols = append(index.Cols, strings.Trim(col, "` []")) + } + index.IsRegular = isRegular + indexes[index.Name] = index + } + + return indexes, nil +} + +func (db *sqlite3) Filters() []Filter { + return []Filter{} +} + +type sqlite3Driver struct { +} + +func (p *sqlite3Driver) Parse(driverName, dataSourceName string) (*URI, error) { + if strings.Contains(dataSourceName, "?") { + dataSourceName = dataSourceName[:strings.Index(dataSourceName, "?")] + } + + return &URI{DBType: schemas.SQLITE, DBName: dataSourceName}, nil +} diff --git a/vendor/xorm.io/xorm/dialects/table_name.go b/vendor/xorm.io/xorm/dialects/table_name.go new file mode 100644 index 0000000..e190cd4 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/table_name.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" + "reflect" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/names" +) + +// TableNameWithSchema will add schema prefix on table name if possible +func TableNameWithSchema(dialect Dialect, tableName string) string { + // Add schema name as prefix of table name. + // Only for postgres database. + if dialect.URI().Schema != "" && + strings.Index(tableName, ".") == -1 { + return fmt.Sprintf("%s.%s", dialect.URI().Schema, tableName) + } + return tableName +} + +// TableNameNoSchema returns table name with given tableName +func TableNameNoSchema(dialect Dialect, mapper names.Mapper, tableName interface{}) string { + quote := dialect.Quoter().Quote + switch tableName.(type) { + case []string: + t := tableName.([]string) + if len(t) > 1 { + return fmt.Sprintf("%v AS %v", quote(t[0]), quote(t[1])) + } else if len(t) == 1 { + return quote(t[0]) + } + case []interface{}: + t := tableName.([]interface{}) + l := len(t) + var table string + if l > 0 { + f := t[0] + switch f.(type) { + case string: + table = f.(string) + case names.TableName: + table = f.(names.TableName).TableName() + default: + v := utils.ReflectValue(f) + t := v.Type() + if t.Kind() == reflect.Struct { + table = names.GetTableName(mapper, v) + } else { + table = quote(fmt.Sprintf("%v", f)) + } + } + } + if l > 1 { + return fmt.Sprintf("%v AS %v", quote(table), quote(fmt.Sprintf("%v", t[1]))) + } else if l == 1 { + return quote(table) + } + case names.TableName: + return tableName.(names.TableName).TableName() + case string: + return tableName.(string) + case reflect.Value: + v := tableName.(reflect.Value) + return names.GetTableName(mapper, v) + default: + v := utils.ReflectValue(tableName) + t := v.Type() + if t.Kind() == reflect.Struct { + return names.GetTableName(mapper, v) + } + return quote(fmt.Sprintf("%v", tableName)) + } + return "" +} + +// FullTableName returns table name with quote and schema according parameter +func FullTableName(dialect Dialect, mapper names.Mapper, bean interface{}, includeSchema ...bool) string { + tbName := TableNameNoSchema(dialect, mapper, bean) + if len(includeSchema) > 0 && includeSchema[0] && !utils.IsSubQuery(tbName) { + tbName = TableNameWithSchema(dialect, tbName) + } + return tbName +} diff --git a/vendor/xorm.io/xorm/dialects/time.go b/vendor/xorm.io/xorm/dialects/time.go new file mode 100644 index 0000000..b039474 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/time.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "time" + + "xorm.io/xorm/schemas" +) + +// FormatTime format time as column type +func FormatTime(dialect Dialect, sqlTypeName string, t time.Time) (v interface{}) { + switch sqlTypeName { + case schemas.Time: + s := t.Format("2006-01-02 15:04:05") // time.RFC3339 + v = s[11:19] + case schemas.Date: + v = t.Format("2006-01-02") + case schemas.DateTime, schemas.TimeStamp, schemas.Varchar: // !DarthPestilane! format time when sqlTypeName is schemas.Varchar. + v = t.Format("2006-01-02 15:04:05") + case schemas.TimeStampz: + if dialect.URI().DBType == schemas.MSSQL { + v = t.Format("2006-01-02T15:04:05.9999999Z07:00") + } else { + v = t.Format(time.RFC3339Nano) + } + case schemas.BigInt, schemas.Int: + v = t.Unix() + default: + v = t + } + return +} + +func FormatColumnTime(dialect Dialect, defaultTimeZone *time.Location, col *schemas.Column, t time.Time) (v interface{}) { + if t.IsZero() { + if col.Nullable { + return nil + } + return "" + } + + if col.TimeZone != nil { + return FormatTime(dialect, col.SQLType.Name, t.In(col.TimeZone)) + } + return FormatTime(dialect, col.SQLType.Name, t.In(defaultTimeZone)) +} diff --git a/vendor/xorm.io/xorm/doc.go b/vendor/xorm.io/xorm/doc.go new file mode 100644 index 0000000..ea6a222 --- /dev/null +++ b/vendor/xorm.io/xorm/doc.go @@ -0,0 +1,184 @@ +// Copyright 2013 - 2016 The XORM Authors. All rights reserved. +// Use of this source code is governed by a BSD +// license that can be found in the LICENSE file. + +/* + +Package xorm is a simple and powerful ORM for Go. + +Installation + +Make sure you have installed Go 1.11+ and then: + + go get xorm.io/xorm + +Create Engine + +Firstly, we should new an engine for a database + + engine, err := xorm.NewEngine(driverName, dataSourceName) + +Method NewEngine's parameters is the same as sql.Open. It depends +drivers' implementation. +Generally, one engine for an application is enough. You can set it as package variable. + +Raw Methods + +XORM also support raw SQL execution: + +1. query a SQL string, the returned results is []map[string][]byte + + results, err := engine.Query("select * from user") + +2. execute a SQL string, the returned results + + affected, err := engine.Exec("update user set .... where ...") + +ORM Methods + +There are 8 major ORM methods and many helpful methods to use to operate database. + +1. Insert one or multiple records to database + + affected, err := engine.Insert(&struct) + // INSERT INTO struct () values () + affected, err := engine.Insert(&struct1, &struct2) + // INSERT INTO struct1 () values () + // INSERT INTO struct2 () values () + affected, err := engine.Insert(&sliceOfStruct) + // INSERT INTO struct () values (),(),() + affected, err := engine.Insert(&struct1, &sliceOfStruct2) + // INSERT INTO struct1 () values () + // INSERT INTO struct2 () values (),(),() + +2. Query one record or one variable from database + + has, err := engine.Get(&user) + // SELECT * FROM user LIMIT 1 + + var id int64 + has, err := engine.Table("user").Where("name = ?", name).Get(&id) + // SELECT id FROM user WHERE name = ? LIMIT 1 + +3. Query multiple records from database + + var sliceOfStructs []Struct + err := engine.Find(&sliceOfStructs) + // SELECT * FROM user + + var mapOfStructs = make(map[int64]Struct) + err := engine.Find(&mapOfStructs) + // SELECT * FROM user + + var int64s []int64 + err := engine.Table("user").Cols("id").Find(&int64s) + // SELECT id FROM user + +4. Query multiple records and record by record handle, there two methods, one is Iterate, +another is Rows + + err := engine.Iterate(...) + // SELECT * FROM user + + rows, err := engine.Rows(...) + // SELECT * FROM user + defer rows.Close() + bean := new(Struct) + for rows.Next() { + err = rows.Scan(bean) + } + +5. Update one or more records + + affected, err := engine.ID(...).Update(&user) + // UPDATE user SET ... + +6. Delete one or more records, Delete MUST has condition + + affected, err := engine.Where(...).Delete(&user) + // DELETE FROM user Where ... + +7. Count records + + counts, err := engine.Count(&user) + // SELECT count(*) AS total FROM user + + counts, err := engine.SQL("select count(*) FROM user").Count() + // select count(*) FROM user + +8. Sum records + + sumFloat64, err := engine.Sum(&user, "id") + // SELECT sum(id) from user + + sumFloat64s, err := engine.Sums(&user, "id1", "id2") + // SELECT sum(id1), sum(id2) from user + + sumInt64s, err := engine.SumsInt(&user, "id1", "id2") + // SELECT sum(id1), sum(id2) from user + +Conditions + +The above 8 methods could use with condition methods chainable. +Attention: the above 8 methods should be the last chainable method. + +1. ID, In + + engine.ID(1).Get(&user) // for single primary key + // SELECT * FROM user WHERE id = 1 + engine.ID(schemas.PK{1, 2}).Get(&user) // for composite primary keys + // SELECT * FROM user WHERE id1 = 1 AND id2 = 2 + engine.In("id", 1, 2, 3).Find(&users) + // SELECT * FROM user WHERE id IN (1, 2, 3) + engine.In("id", []int{1, 2, 3}).Find(&users) + // SELECT * FROM user WHERE id IN (1, 2, 3) + +2. Where, And, Or + + engine.Where().And().Or().Find() + // SELECT * FROM user WHERE (.. AND ..) OR ... + +3. OrderBy, Asc, Desc + + engine.Asc().Desc().Find() + // SELECT * FROM user ORDER BY .. ASC, .. DESC + engine.OrderBy().Find() + // SELECT * FROM user ORDER BY .. + +4. Limit, Top + + engine.Limit().Find() + // SELECT * FROM user LIMIT .. OFFSET .. + engine.Top(5).Find() + // SELECT TOP 5 * FROM user // for mssql + // SELECT * FROM user LIMIT .. OFFSET 0 //for other databases + +5. SQL, let you custom SQL + + var users []User + engine.SQL("select * from user").Find(&users) + +6. Cols, Omit, Distinct + + var users []*User + engine.Cols("col1, col2").Find(&users) + // SELECT col1, col2 FROM user + engine.Cols("col1", "col2").Where().Update(user) + // UPDATE user set col1 = ?, col2 = ? Where ... + engine.Omit("col1").Find(&users) + // SELECT col2, col3 FROM user + engine.Omit("col1").Insert(&user) + // INSERT INTO table (non-col1) VALUES () + engine.Distinct("col1").Find(&users) + // SELECT DISTINCT col1 FROM user + +7. Join, GroupBy, Having + + engine.GroupBy("name").Having("name='xlw'").Find(&users) + //SELECT * FROM user GROUP BY name HAVING name='xlw' + engine.Join("LEFT", "userdetail", "user.id=userdetail.id").Find(&users) + //SELECT * FROM user LEFT JOIN userdetail ON user.id=userdetail.id + +More usage, please visit http://xorm.io/docs +*/ +package xorm diff --git a/vendor/xorm.io/xorm/engine.go b/vendor/xorm.io/xorm/engine.go new file mode 100644 index 0000000..4159a7b --- /dev/null +++ b/vendor/xorm.io/xorm/engine.go @@ -0,0 +1,1303 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/contexts" + "xorm.io/xorm/core" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/log" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" + "xorm.io/xorm/tags" +) + +// Engine is the major struct of xorm, it means a database manager. +// Commonly, an application only need one engine +type Engine struct { + cacherMgr *caches.Manager + defaultContext context.Context + dialect dialects.Dialect + engineGroup *EngineGroup + logger log.ContextLogger + tagParser *tags.Parser + db *core.DB + + driverName string + dataSourceName string + + TZLocation *time.Location // The timezone of the application + DatabaseTZ *time.Location // The timezone of the database + + logSessionID bool // create session id +} + +// NewEngine new a db manager according to the parameter. Currently support four +// drivers +func NewEngine(driverName string, dataSourceName string) (*Engine, error) { + dialect, err := dialects.OpenDialect(driverName, dataSourceName) + if err != nil { + return nil, err + } + + db, err := core.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + + cacherMgr := caches.NewManager() + mapper := names.NewCacheMapper(new(names.SnakeMapper)) + tagParser := tags.NewParser("xorm", dialect, mapper, mapper, cacherMgr) + + engine := &Engine{ + dialect: dialect, + TZLocation: time.Local, + defaultContext: context.Background(), + cacherMgr: cacherMgr, + tagParser: tagParser, + driverName: driverName, + dataSourceName: dataSourceName, + db: db, + logSessionID: false, + } + + if dialect.URI().DBType == schemas.SQLITE { + engine.DatabaseTZ = time.UTC + } else { + engine.DatabaseTZ = time.Local + } + + logger := log.NewSimpleLogger(os.Stdout) + logger.SetLevel(log.LOG_INFO) + engine.SetLogger(log.NewLoggerAdapter(logger)) + + runtime.SetFinalizer(engine, func(engine *Engine) { + engine.Close() + }) + + return engine, nil +} + +// NewEngineWithParams new a db manager with params. The params will be passed to dialects. +func NewEngineWithParams(driverName string, dataSourceName string, params map[string]string) (*Engine, error) { + engine, err := NewEngine(driverName, dataSourceName) + engine.dialect.SetParams(params) + return engine, err +} + +// EnableSessionID if enable session id +func (engine *Engine) EnableSessionID(enable bool) { + engine.logSessionID = enable +} + +// SetCacher sets cacher for the table +func (engine *Engine) SetCacher(tableName string, cacher caches.Cacher) { + engine.cacherMgr.SetCacher(tableName, cacher) +} + +// GetCacher returns the cachher of the special table +func (engine *Engine) GetCacher(tableName string) caches.Cacher { + return engine.cacherMgr.GetCacher(tableName) +} + +// SetQuotePolicy sets the special quote policy +func (engine *Engine) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { + engine.dialect.SetQuotePolicy(quotePolicy) +} + +// BufferSize sets buffer size for iterate +func (engine *Engine) BufferSize(size int) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.BufferSize(size) +} + +// ShowSQL show SQL statement or not on logger if log level is great than INFO +func (engine *Engine) ShowSQL(show ...bool) { + engine.logger.ShowSQL(show...) + engine.DB().Logger = engine.logger +} + +// Logger return the logger interface +func (engine *Engine) Logger() log.ContextLogger { + return engine.logger +} + +// SetLogger set the new logger +func (engine *Engine) SetLogger(logger interface{}) { + var realLogger log.ContextLogger + switch t := logger.(type) { + case log.ContextLogger: + realLogger = t + case log.Logger: + realLogger = log.NewLoggerAdapter(t) + } + engine.logger = realLogger + engine.DB().Logger = realLogger +} + +// SetLogLevel sets the logger level +func (engine *Engine) SetLogLevel(level log.LogLevel) { + engine.logger.SetLevel(level) +} + +// SetDisableGlobalCache disable global cache or not +func (engine *Engine) SetDisableGlobalCache(disable bool) { + engine.cacherMgr.SetDisableGlobalCache(disable) +} + +// DriverName return the current sql driver's name +func (engine *Engine) DriverName() string { + return engine.driverName +} + +// DataSourceName return the current connection string +func (engine *Engine) DataSourceName() string { + return engine.dataSourceName +} + +// SetMapper set the name mapping rules +func (engine *Engine) SetMapper(mapper names.Mapper) { + engine.SetTableMapper(mapper) + engine.SetColumnMapper(mapper) +} + +// SetTableMapper set the table name mapping rule +func (engine *Engine) SetTableMapper(mapper names.Mapper) { + engine.tagParser.SetTableMapper(mapper) +} + +// SetColumnMapper set the column name mapping rule +func (engine *Engine) SetColumnMapper(mapper names.Mapper) { + engine.tagParser.SetColumnMapper(mapper) +} + +// Quote Use QuoteStr quote the string sql +func (engine *Engine) Quote(value string) string { + value = strings.TrimSpace(value) + if len(value) == 0 { + return value + } + + buf := strings.Builder{} + engine.QuoteTo(&buf, value) + + return buf.String() +} + +// QuoteTo quotes string and writes into the buffer +func (engine *Engine) QuoteTo(buf *strings.Builder, value string) { + if buf == nil { + return + } + + value = strings.TrimSpace(value) + if value == "" { + return + } + engine.dialect.Quoter().QuoteTo(buf, value) +} + +// SQLType A simple wrapper to dialect's core.SqlType method +func (engine *Engine) SQLType(c *schemas.Column) string { + return engine.dialect.SQLType(c) +} + +// AutoIncrStr Database's autoincrement statement +func (engine *Engine) AutoIncrStr() string { + return engine.dialect.AutoIncrStr() +} + +// SetConnMaxLifetime sets the maximum amount of time a connection may be reused. +func (engine *Engine) SetConnMaxLifetime(d time.Duration) { + engine.DB().SetConnMaxLifetime(d) +} + +// SetMaxOpenConns is only available for go 1.2+ +func (engine *Engine) SetMaxOpenConns(conns int) { + engine.DB().SetMaxOpenConns(conns) +} + +// SetMaxIdleConns set the max idle connections on pool, default is 2 +func (engine *Engine) SetMaxIdleConns(conns int) { + engine.DB().SetMaxIdleConns(conns) +} + +// SetDefaultCacher set the default cacher. Xorm's default not enable cacher. +func (engine *Engine) SetDefaultCacher(cacher caches.Cacher) { + engine.cacherMgr.SetDefaultCacher(cacher) +} + +// GetDefaultCacher returns the default cacher +func (engine *Engine) GetDefaultCacher() caches.Cacher { + return engine.cacherMgr.GetDefaultCacher() +} + +// NoCache If you has set default cacher, and you want temporilly stop use cache, +// you can use NoCache() +func (engine *Engine) NoCache() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NoCache() +} + +// NoCascade If you do not want to auto cascade load object +func (engine *Engine) NoCascade() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NoCascade() +} + +// MapCacher Set a table use a special cacher +func (engine *Engine) MapCacher(bean interface{}, cacher caches.Cacher) error { + engine.SetCacher(dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, true), cacher) + return nil +} + +// NewDB provides an interface to operate database directly +func (engine *Engine) NewDB() (*core.DB, error) { + return core.Open(engine.driverName, engine.dataSourceName) +} + +// DB return the wrapper of sql.DB +func (engine *Engine) DB() *core.DB { + return engine.db +} + +// Dialect return database dialect +func (engine *Engine) Dialect() dialects.Dialect { + return engine.dialect +} + +// NewSession New a session +func (engine *Engine) NewSession() *Session { + return newSession(engine) +} + +// Close the engine +func (engine *Engine) Close() error { + return engine.DB().Close() +} + +// Ping tests if database is alive +func (engine *Engine) Ping() error { + session := engine.NewSession() + defer session.Close() + return session.Ping() +} + +// SQL method let's you manually write raw SQL and operate +// For example: +// +// engine.SQL("select * from user").Find(&users) +// +// This code will execute "select * from user" and set the records to users +func (engine *Engine) SQL(query interface{}, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.SQL(query, args...) +} + +// NoAutoTime Default if your struct has "created" or "updated" filed tag, the fields +// will automatically be filled with current time when Insert or Update +// invoked. Call NoAutoTime if you dont' want to fill automatically. +func (engine *Engine) NoAutoTime() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NoAutoTime() +} + +// NoAutoCondition disable auto generate Where condition from bean or not +func (engine *Engine) NoAutoCondition(no ...bool) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NoAutoCondition(no...) +} + +func (engine *Engine) loadTableInfo(table *schemas.Table) error { + colSeq, cols, err := engine.dialect.GetColumns(engine.db, engine.defaultContext, table.Name) + if err != nil { + return err + } + for _, name := range colSeq { + table.AddColumn(cols[name]) + } + indexes, err := engine.dialect.GetIndexes(engine.db, engine.defaultContext, table.Name) + if err != nil { + return err + } + table.Indexes = indexes + + var seq int + for _, index := range indexes { + for _, name := range index.Cols { + parts := strings.Split(name, " ") + if len(parts) > 1 { + if parts[1] == "DESC" { + seq = 1 + } + } + if col := table.GetColumn(parts[0]); col != nil { + col.Indexes[index.Name] = index.Type + } else { + return fmt.Errorf("Unknown col %s seq %d, in index %v of table %v, columns %v", name, seq, index.Name, table.Name, table.ColumnsSeq()) + } + } + } + return nil +} + +// DBMetas Retrieve all tables, columns, indexes' informations from database. +func (engine *Engine) DBMetas() ([]*schemas.Table, error) { + tables, err := engine.dialect.GetTables(engine.db, engine.defaultContext) + if err != nil { + return nil, err + } + + for _, table := range tables { + if err = engine.loadTableInfo(table); err != nil { + return nil, err + } + } + return tables, nil +} + +// DumpAllToFile dump database all table structs and data to a file +func (engine *Engine) DumpAllToFile(fp string, tp ...schemas.DBType) error { + f, err := os.Create(fp) + if err != nil { + return err + } + defer f.Close() + return engine.DumpAll(f, tp...) +} + +// DumpAll dump database all table structs and data to w +func (engine *Engine) DumpAll(w io.Writer, tp ...schemas.DBType) error { + tables, err := engine.DBMetas() + if err != nil { + return err + } + return engine.DumpTables(tables, w, tp...) +} + +// DumpTablesToFile dump specified tables to SQL file. +func (engine *Engine) DumpTablesToFile(tables []*schemas.Table, fp string, tp ...schemas.DBType) error { + f, err := os.Create(fp) + if err != nil { + return err + } + defer f.Close() + return engine.DumpTables(tables, f, tp...) +} + +// DumpTables dump specify tables to io.Writer +func (engine *Engine) DumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error { + return engine.dumpTables(tables, w, tp...) +} + +func formatColumnValue(dstDialect dialects.Dialect, d interface{}, col *schemas.Column) string { + if d == nil { + return "NULL" + } + + if dq, ok := d.(bool); ok && (dstDialect.URI().DBType == schemas.SQLITE || + dstDialect.URI().DBType == schemas.MSSQL) { + if dq { + return "1" + } + return "0" + } + + if col.SQLType.IsText() { + var v = fmt.Sprintf("%s", d) + return "'" + strings.Replace(v, "'", "''", -1) + "'" + } else if col.SQLType.IsTime() { + var v = fmt.Sprintf("%s", d) + if strings.HasSuffix(v, " +0000 UTC") { + return fmt.Sprintf("'%s'", v[0:len(v)-len(" +0000 UTC")]) + } else if strings.HasSuffix(v, " +0000 +0000") { + return fmt.Sprintf("'%s'", v[0:len(v)-len(" +0000 +0000")]) + } + return "'" + strings.Replace(v, "'", "''", -1) + "'" + } else if col.SQLType.IsBlob() { + if reflect.TypeOf(d).Kind() == reflect.Slice { + return fmt.Sprintf("%s", dstDialect.FormatBytes(d.([]byte))) + } else if reflect.TypeOf(d).Kind() == reflect.String { + return fmt.Sprintf("'%s'", d.(string)) + } + } else if col.SQLType.IsNumeric() { + switch reflect.TypeOf(d).Kind() { + case reflect.Slice: + if col.SQLType.Name == schemas.Bool { + return fmt.Sprintf("%v", strconv.FormatBool(d.([]byte)[0] != byte('0'))) + } + return fmt.Sprintf("%s", string(d.([]byte))) + case reflect.Int16, reflect.Int8, reflect.Int32, reflect.Int64, reflect.Int: + if col.SQLType.Name == schemas.Bool { + v := reflect.ValueOf(d).Int() > 0 + if dstDialect.URI().DBType == schemas.SQLITE { + if v { + return "1" + } + return "0" + } + return fmt.Sprintf("%v", strconv.FormatBool(v)) + } + return fmt.Sprintf("%v", d) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if col.SQLType.Name == schemas.Bool { + v := reflect.ValueOf(d).Uint() > 0 + if dstDialect.URI().DBType == schemas.SQLITE { + if v { + return "1" + } + return "0" + } + return fmt.Sprintf("%v", strconv.FormatBool(v)) + } + return fmt.Sprintf("%v", d) + default: + return fmt.Sprintf("%v", d) + } + } + + s := fmt.Sprintf("%v", d) + if strings.Contains(s, ":") || strings.Contains(s, "-") { + if strings.HasSuffix(s, " +0000 UTC") { + return fmt.Sprintf("'%s'", s[0:len(s)-len(" +0000 UTC")]) + } + return fmt.Sprintf("'%s'", s) + } + return s +} + +// dumpTables dump database all table structs and data to w with specify db type +func (engine *Engine) dumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error { + var dstDialect dialects.Dialect + if len(tp) == 0 { + dstDialect = engine.dialect + } else { + dstDialect = dialects.QueryDialect(tp[0]) + if dstDialect == nil { + return errors.New("Unsupported database type") + } + + uri := engine.dialect.URI() + destURI := dialects.URI{ + DBType: tp[0], + DBName: uri.DBName, + } + dstDialect.Init(&destURI) + } + + _, err := io.WriteString(w, fmt.Sprintf("/*Generated by xorm %s, from %s to %s*/\n\n", + time.Now().In(engine.TZLocation).Format("2006-01-02 15:04:05"), engine.dialect.URI().DBType, dstDialect.URI().DBType)) + if err != nil { + return err + } + + for i, table := range tables { + tableName := table.Name + if dstDialect.URI().Schema != "" { + tableName = fmt.Sprintf("%s.%s", dstDialect.URI().Schema, table.Name) + } + originalTableName := table.Name + if engine.dialect.URI().Schema != "" { + originalTableName = fmt.Sprintf("%s.%s", engine.dialect.URI().Schema, table.Name) + } + if i > 0 { + _, err = io.WriteString(w, "\n") + if err != nil { + return err + } + } + sqls, _ := dstDialect.CreateTableSQL(table, tableName) + for _, s := range sqls { + _, err = io.WriteString(w, s+";\n") + if err != nil { + return err + } + } + if len(table.PKColumns()) > 0 && dstDialect.URI().DBType == schemas.MSSQL { + fmt.Fprintf(w, "SET IDENTITY_INSERT [%s] ON;\n", table.Name) + } + + for _, index := range table.Indexes { + _, err = io.WriteString(w, dstDialect.CreateIndexSQL(table.Name, index)+";\n") + if err != nil { + return err + } + } + + cols := table.ColumnsSeq() + colNames := engine.dialect.Quoter().Join(cols, ", ") + destColNames := dstDialect.Quoter().Join(cols, ", ") + + rows, err := engine.DB().QueryContext(engine.defaultContext, "SELECT "+colNames+" FROM "+engine.Quote(originalTableName)) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + dest := make([]interface{}, len(cols)) + err = rows.ScanSlice(&dest) + if err != nil { + return err + } + + _, err = io.WriteString(w, "INSERT INTO "+dstDialect.Quoter().Quote(tableName)+" ("+destColNames+") VALUES (") + if err != nil { + return err + } + + var temp string + for i, d := range dest { + col := table.GetColumn(cols[i]) + if col == nil { + return errors.New("unknow column error") + } + temp += "," + formatColumnValue(dstDialect, d, col) + } + _, err = io.WriteString(w, temp[1:]+");\n") + if err != nil { + return err + } + } + + // FIXME: Hack for postgres + if dstDialect.URI().DBType == schemas.POSTGRES && table.AutoIncrColumn() != nil { + _, err = io.WriteString(w, "SELECT setval('"+tableName+"_id_seq', COALESCE((SELECT MAX("+table.AutoIncrColumn().Name+") + 1 FROM "+dstDialect.Quoter().Quote(tableName)+"), 1), false);\n") + if err != nil { + return err + } + } + } + return nil +} + +// Cascade use cascade or not +func (engine *Engine) Cascade(trueOrFalse ...bool) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Cascade(trueOrFalse...) +} + +// Where method provide a condition query +func (engine *Engine) Where(query interface{}, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Where(query, args...) +} + +// ID method provoide a condition as (id) = ? +func (engine *Engine) ID(id interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.ID(id) +} + +// Before apply before Processor, affected bean is passed to closure arg +func (engine *Engine) Before(closures func(interface{})) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Before(closures) +} + +// After apply after insert Processor, affected bean is passed to closure arg +func (engine *Engine) After(closures func(interface{})) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.After(closures) +} + +// Charset set charset when create table, only support mysql now +func (engine *Engine) Charset(charset string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Charset(charset) +} + +// StoreEngine set store engine when create table, only support mysql now +func (engine *Engine) StoreEngine(storeEngine string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.StoreEngine(storeEngine) +} + +// Distinct use for distinct columns. Caution: when you are using cache, +// distinct will not be cached because cache system need id, +// but distinct will not provide id +func (engine *Engine) Distinct(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Distinct(columns...) +} + +// Select customerize your select columns or contents +func (engine *Engine) Select(str string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Select(str) +} + +// Cols only use the parameters as select or update columns +func (engine *Engine) Cols(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Cols(columns...) +} + +// AllCols indicates that all columns should be use +func (engine *Engine) AllCols() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.AllCols() +} + +// MustCols specify some columns must use even if they are empty +func (engine *Engine) MustCols(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.MustCols(columns...) +} + +// UseBool xorm automatically retrieve condition according struct, but +// if struct has bool field, it will ignore them. So use UseBool +// to tell system to do not ignore them. +// If no parameters, it will use all the bool field of struct, or +// it will use parameters's columns +func (engine *Engine) UseBool(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.UseBool(columns...) +} + +// Omit only not use the parameters as select or update columns +func (engine *Engine) Omit(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Omit(columns...) +} + +// Nullable set null when column is zero-value and nullable for update +func (engine *Engine) Nullable(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Nullable(columns...) +} + +// In will generate "column IN (?, ?)" +func (engine *Engine) In(column string, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.In(column, args...) +} + +// NotIn will generate "column NOT IN (?, ?)" +func (engine *Engine) NotIn(column string, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NotIn(column, args...) +} + +// Incr provides a update string like "column = column + ?" +func (engine *Engine) Incr(column string, arg ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Incr(column, arg...) +} + +// Decr provides a update string like "column = column - ?" +func (engine *Engine) Decr(column string, arg ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Decr(column, arg...) +} + +// SetExpr provides a update string like "column = {expression}" +func (engine *Engine) SetExpr(column string, expression interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.SetExpr(column, expression) +} + +// Table temporarily change the Get, Find, Update's table +func (engine *Engine) Table(tableNameOrBean interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Table(tableNameOrBean) +} + +// Alias set the table alias +func (engine *Engine) Alias(alias string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Alias(alias) +} + +// Limit will generate "LIMIT start, limit" +func (engine *Engine) Limit(limit int, start ...int) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Limit(limit, start...) +} + +// Desc will generate "ORDER BY column1 DESC, column2 DESC" +func (engine *Engine) Desc(colNames ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Desc(colNames...) +} + +// Asc will generate "ORDER BY column1,column2 Asc" +// This method can chainable use. +// +// engine.Desc("name").Asc("age").Find(&users) +// // SELECT * FROM user ORDER BY name DESC, age ASC +// +func (engine *Engine) Asc(colNames ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Asc(colNames...) +} + +// OrderBy will generate "ORDER BY order" +func (engine *Engine) OrderBy(order string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.OrderBy(order) +} + +// Prepare enables prepare statement +func (engine *Engine) Prepare() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Prepare() +} + +// Join the join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN +func (engine *Engine) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Join(joinOperator, tablename, condition, args...) +} + +// GroupBy generate group by statement +func (engine *Engine) GroupBy(keys string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.GroupBy(keys) +} + +// Having generate having statement +func (engine *Engine) Having(conditions string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Having(conditions) +} + +// Table table struct +type Table struct { + *schemas.Table + Name string +} + +// IsValid if table is valid +func (t *Table) IsValid() bool { + return t.Table != nil && len(t.Name) > 0 +} + +// TableInfo get table info according to bean's content +func (engine *Engine) TableInfo(bean interface{}) (*schemas.Table, error) { + v := utils.ReflectValue(bean) + return engine.tagParser.ParseWithCache(v) +} + +// IsTableEmpty if a table has any reocrd +func (engine *Engine) IsTableEmpty(bean interface{}) (bool, error) { + session := engine.NewSession() + defer session.Close() + return session.IsTableEmpty(bean) +} + +// IsTableExist if a table is exist +func (engine *Engine) IsTableExist(beanOrTableName interface{}) (bool, error) { + session := engine.NewSession() + defer session.Close() + return session.IsTableExist(beanOrTableName) +} + +// TableName returns table name with schema prefix if has +func (engine *Engine) TableName(bean interface{}, includeSchema ...bool) string { + return dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, includeSchema...) +} + +// CreateIndexes create indexes +func (engine *Engine) CreateIndexes(bean interface{}) error { + session := engine.NewSession() + defer session.Close() + return session.CreateIndexes(bean) +} + +// CreateUniques create uniques +func (engine *Engine) CreateUniques(bean interface{}) error { + session := engine.NewSession() + defer session.Close() + return session.CreateUniques(bean) +} + +// ClearCacheBean if enabled cache, clear the cache bean +func (engine *Engine) ClearCacheBean(bean interface{}, id string) error { + tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + cacher := engine.GetCacher(tableName) + if cacher != nil { + cacher.ClearIds(tableName) + cacher.DelBean(tableName, id) + } + return nil +} + +// ClearCache if enabled cache, clear some tables' cache +func (engine *Engine) ClearCache(beans ...interface{}) error { + for _, bean := range beans { + tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + cacher := engine.GetCacher(tableName) + if cacher != nil { + cacher.ClearIds(tableName) + cacher.ClearBeans(tableName) + } + } + return nil +} + +// UnMapType remove table from tables cache +func (engine *Engine) UnMapType(t reflect.Type) { + engine.tagParser.ClearCacheTable(t) +} + +// Sync the new struct changes to database, this method will automatically add +// table, column, index, unique. but will not delete or change anything. +// If you change some field, you should change the database manually. +func (engine *Engine) Sync(beans ...interface{}) error { + session := engine.NewSession() + defer session.Close() + + for _, bean := range beans { + v := utils.ReflectValue(bean) + tableNameNoSchema := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + table, err := engine.tagParser.ParseWithCache(v) + if err != nil { + return err + } + + isExist, err := session.Table(bean).isTableExist(tableNameNoSchema) + if err != nil { + return err + } + if !isExist { + err = session.createTable(bean) + if err != nil { + return err + } + } + /*isEmpty, err := engine.IsEmptyTable(bean) + if err != nil { + return err + }*/ + var isEmpty bool + if isEmpty { + err = session.dropTable(bean) + if err != nil { + return err + } + err = session.createTable(bean) + if err != nil { + return err + } + } else { + for _, col := range table.Columns() { + isExist, err := engine.dialect.IsColumnExist(engine.db, session.ctx, tableNameNoSchema, col.Name) + if err != nil { + return err + } + if !isExist { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + err = session.addColumn(col.Name) + if err != nil { + return err + } + } + } + + for name, index := range table.Indexes { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + if index.Type == schemas.UniqueType { + isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, true) + if err != nil { + return err + } + if !isExist { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + err = session.addUnique(tableNameNoSchema, name) + if err != nil { + return err + } + } + } else if index.Type == schemas.IndexType { + isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, false) + if err != nil { + return err + } + if !isExist { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + err = session.addIndex(tableNameNoSchema, name) + if err != nil { + return err + } + } + } else { + return errors.New("unknow index type") + } + } + } + } + return nil +} + +// Sync2 synchronize structs to database tables +func (engine *Engine) Sync2(beans ...interface{}) error { + s := engine.NewSession() + defer s.Close() + return s.Sync2(beans...) +} + +// CreateTables create tabls according bean +func (engine *Engine) CreateTables(beans ...interface{}) error { + session := engine.NewSession() + defer session.Close() + + err := session.Begin() + if err != nil { + return err + } + + for _, bean := range beans { + err = session.createTable(bean) + if err != nil { + session.Rollback() + return err + } + } + return session.Commit() +} + +// DropTables drop specify tables +func (engine *Engine) DropTables(beans ...interface{}) error { + session := engine.NewSession() + defer session.Close() + + err := session.Begin() + if err != nil { + return err + } + + for _, bean := range beans { + err = session.dropTable(bean) + if err != nil { + session.Rollback() + return err + } + } + return session.Commit() +} + +// DropIndexes drop indexes of a table +func (engine *Engine) DropIndexes(bean interface{}) error { + session := engine.NewSession() + defer session.Close() + return session.DropIndexes(bean) +} + +// Exec raw sql +func (engine *Engine) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { + session := engine.NewSession() + defer session.Close() + return session.Exec(sqlOrArgs...) +} + +// Query a raw sql and return records as []map[string][]byte +func (engine *Engine) Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) { + session := engine.NewSession() + defer session.Close() + return session.Query(sqlOrArgs...) +} + +// QueryString runs a raw sql and return records as []map[string]string +func (engine *Engine) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { + session := engine.NewSession() + defer session.Close() + return session.QueryString(sqlOrArgs...) +} + +// QueryInterface runs a raw sql and return records as []map[string]interface{} +func (engine *Engine) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { + session := engine.NewSession() + defer session.Close() + return session.QueryInterface(sqlOrArgs...) +} + +// Insert one or more records +func (engine *Engine) Insert(beans ...interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.Insert(beans...) +} + +// InsertOne insert only one record +func (engine *Engine) InsertOne(bean interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.InsertOne(bean) +} + +// Update records, bean's non-empty fields are updated contents, +// condiBean' non-empty filds are conditions +// CAUTION: +// 1.bool will defaultly be updated content nor conditions +// You should call UseBool if you have bool to use. +// 2.float32 & float64 may be not inexact as conditions +func (engine *Engine) Update(bean interface{}, condiBeans ...interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.Update(bean, condiBeans...) +} + +// Delete records, bean's non-empty fields are conditions +func (engine *Engine) Delete(bean interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.Delete(bean) +} + +// Get retrieve one record from table, bean's non-empty fields +// are conditions +func (engine *Engine) Get(bean interface{}) (bool, error) { + session := engine.NewSession() + defer session.Close() + return session.Get(bean) +} + +// Exist returns true if the record exist otherwise return false +func (engine *Engine) Exist(bean ...interface{}) (bool, error) { + session := engine.NewSession() + defer session.Close() + return session.Exist(bean...) +} + +// Find retrieve records from table, condiBeans's non-empty fields +// are conditions. beans could be []Struct, []*Struct, map[int64]Struct +// map[int64]*Struct +func (engine *Engine) Find(beans interface{}, condiBeans ...interface{}) error { + session := engine.NewSession() + defer session.Close() + return session.Find(beans, condiBeans...) +} + +// FindAndCount find the results and also return the counts +func (engine *Engine) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.FindAndCount(rowsSlicePtr, condiBean...) +} + +// Iterate record by record handle records from table, bean's non-empty fields +// are conditions. +func (engine *Engine) Iterate(bean interface{}, fun IterFunc) error { + session := engine.NewSession() + defer session.Close() + return session.Iterate(bean, fun) +} + +// Rows return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields +// are conditions. +func (engine *Engine) Rows(bean interface{}) (*Rows, error) { + session := engine.NewSession() + return session.Rows(bean) +} + +// Count counts the records. bean's non-empty fields are conditions. +func (engine *Engine) Count(bean ...interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.Count(bean...) +} + +// Sum sum the records by some column. bean's non-empty fields are conditions. +func (engine *Engine) Sum(bean interface{}, colName string) (float64, error) { + session := engine.NewSession() + defer session.Close() + return session.Sum(bean, colName) +} + +// SumInt sum the records by some column. bean's non-empty fields are conditions. +func (engine *Engine) SumInt(bean interface{}, colName string) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.SumInt(bean, colName) +} + +// Sums sum the records by some columns. bean's non-empty fields are conditions. +func (engine *Engine) Sums(bean interface{}, colNames ...string) ([]float64, error) { + session := engine.NewSession() + defer session.Close() + return session.Sums(bean, colNames...) +} + +// SumsInt like Sums but return slice of int64 instead of float64. +func (engine *Engine) SumsInt(bean interface{}, colNames ...string) ([]int64, error) { + session := engine.NewSession() + defer session.Close() + return session.SumsInt(bean, colNames...) +} + +// ImportFile SQL DDL file +func (engine *Engine) ImportFile(ddlPath string) ([]sql.Result, error) { + session := engine.NewSession() + defer session.Close() + return session.ImportFile(ddlPath) +} + +// Import SQL DDL from io.Reader +func (engine *Engine) Import(r io.Reader) ([]sql.Result, error) { + session := engine.NewSession() + defer session.Close() + return session.Import(r) +} + +// nowTime return current time +func (engine *Engine) nowTime(col *schemas.Column) (interface{}, time.Time) { + t := time.Now() + var tz = engine.DatabaseTZ + if !col.DisableTimeZone && col.TimeZone != nil { + tz = col.TimeZone + } + return dialects.FormatTime(engine.dialect, col.SQLType.Name, t.In(tz)), t.In(engine.TZLocation) +} + +// GetColumnMapper returns the column name mapper +func (engine *Engine) GetColumnMapper() names.Mapper { + return engine.tagParser.GetColumnMapper() +} + +// GetTableMapper returns the table name mapper +func (engine *Engine) GetTableMapper() names.Mapper { + return engine.tagParser.GetTableMapper() +} + +// GetTZLocation returns time zone of the application +func (engine *Engine) GetTZLocation() *time.Location { + return engine.TZLocation +} + +// SetTZLocation sets time zone of the application +func (engine *Engine) SetTZLocation(tz *time.Location) { + engine.TZLocation = tz +} + +// GetTZDatabase returns time zone of the database +func (engine *Engine) GetTZDatabase() *time.Location { + return engine.DatabaseTZ +} + +// SetTZDatabase sets time zone of the database +func (engine *Engine) SetTZDatabase(tz *time.Location) { + engine.DatabaseTZ = tz +} + +// SetSchema sets the schema of database +func (engine *Engine) SetSchema(schema string) { + engine.dialect.URI().SetSchema(schema) +} + +func (engine *Engine) AddHook(hook contexts.Hook) { + engine.db.AddHook(hook) +} + +// Unscoped always disable struct tag "deleted" +func (engine *Engine) Unscoped() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Unscoped() +} + +func (engine *Engine) tbNameWithSchema(v string) string { + return dialects.TableNameWithSchema(engine.dialect, v) +} + +// ContextHook creates a session with the context +func (engine *Engine) Context(ctx context.Context) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Context(ctx) +} + +// SetDefaultContext set the default context +func (engine *Engine) SetDefaultContext(ctx context.Context) { + engine.defaultContext = ctx +} + +// PingContext tests if database is alive +func (engine *Engine) PingContext(ctx context.Context) error { + session := engine.NewSession() + defer session.Close() + return session.PingContext(ctx) +} + +// Transaction Execute sql wrapped in a transaction(abbr as tx), tx will automatic commit if no errors occurred +func (engine *Engine) Transaction(f func(*Session) (interface{}, error)) (interface{}, error) { + session := engine.NewSession() + defer session.Close() + + if err := session.Begin(); err != nil { + return nil, err + } + + result, err := f(session) + if err != nil { + return result, err + } + + if err := session.Commit(); err != nil { + return result, err + } + + return result, nil +} diff --git a/vendor/xorm.io/xorm/engine_group.go b/vendor/xorm.io/xorm/engine_group.go new file mode 100644 index 0000000..cdd9dd4 --- /dev/null +++ b/vendor/xorm.io/xorm/engine_group.go @@ -0,0 +1,230 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "context" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/contexts" + "xorm.io/xorm/dialects" + "xorm.io/xorm/log" + "xorm.io/xorm/names" +) + +// EngineGroup defines an engine group +type EngineGroup struct { + *Engine + slaves []*Engine + policy GroupPolicy +} + +// NewEngineGroup creates a new engine group +func NewEngineGroup(args1 interface{}, args2 interface{}, policies ...GroupPolicy) (*EngineGroup, error) { + var eg EngineGroup + if len(policies) > 0 { + eg.policy = policies[0] + } else { + eg.policy = RoundRobinPolicy() + } + + driverName, ok1 := args1.(string) + conns, ok2 := args2.([]string) + if ok1 && ok2 { + engines := make([]*Engine, len(conns)) + for i, conn := range conns { + engine, err := NewEngine(driverName, conn) + if err != nil { + return nil, err + } + engine.engineGroup = &eg + engines[i] = engine + } + + eg.Engine = engines[0] + eg.slaves = engines[1:] + return &eg, nil + } + + master, ok3 := args1.(*Engine) + slaves, ok4 := args2.([]*Engine) + if ok3 && ok4 { + master.engineGroup = &eg + for i := 0; i < len(slaves); i++ { + slaves[i].engineGroup = &eg + } + eg.Engine = master + eg.slaves = slaves + return &eg, nil + } + return nil, ErrParamsType +} + +// Close the engine +func (eg *EngineGroup) Close() error { + err := eg.Engine.Close() + if err != nil { + return err + } + + for i := 0; i < len(eg.slaves); i++ { + err := eg.slaves[i].Close() + if err != nil { + return err + } + } + return nil +} + +// ContextHook returned a group session +func (eg *EngineGroup) Context(ctx context.Context) *Session { + sess := eg.NewSession() + sess.isAutoClose = true + return sess.Context(ctx) +} + +// NewSession returned a group session +func (eg *EngineGroup) NewSession() *Session { + sess := eg.Engine.NewSession() + sess.sessionType = groupSession + return sess +} + +// Master returns the master engine +func (eg *EngineGroup) Master() *Engine { + return eg.Engine +} + +// Ping tests if database is alive +func (eg *EngineGroup) Ping() error { + if err := eg.Engine.Ping(); err != nil { + return err + } + + for _, slave := range eg.slaves { + if err := slave.Ping(); err != nil { + return err + } + } + return nil +} + +// SetColumnMapper set the column name mapping rule +func (eg *EngineGroup) SetColumnMapper(mapper names.Mapper) { + eg.Engine.SetColumnMapper(mapper) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetColumnMapper(mapper) + } +} + +// SetConnMaxLifetime sets the maximum amount of time a connection may be reused. +func (eg *EngineGroup) SetConnMaxLifetime(d time.Duration) { + eg.Engine.SetConnMaxLifetime(d) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetConnMaxLifetime(d) + } +} + +// SetDefaultCacher set the default cacher +func (eg *EngineGroup) SetDefaultCacher(cacher caches.Cacher) { + eg.Engine.SetDefaultCacher(cacher) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetDefaultCacher(cacher) + } +} + +// SetLogger set the new logger +func (eg *EngineGroup) SetLogger(logger interface{}) { + eg.Engine.SetLogger(logger) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetLogger(logger) + } +} + +func (eg *EngineGroup) AddHook(hook contexts.Hook) { + eg.Engine.AddHook(hook) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].AddHook(hook) + } +} + +// SetLogLevel sets the logger level +func (eg *EngineGroup) SetLogLevel(level log.LogLevel) { + eg.Engine.SetLogLevel(level) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetLogLevel(level) + } +} + +// SetMapper set the name mapping rules +func (eg *EngineGroup) SetMapper(mapper names.Mapper) { + eg.Engine.SetMapper(mapper) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetMapper(mapper) + } +} + +// SetMaxIdleConns set the max idle connections on pool, default is 2 +func (eg *EngineGroup) SetMaxIdleConns(conns int) { + eg.Engine.DB().SetMaxIdleConns(conns) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].DB().SetMaxIdleConns(conns) + } +} + +// SetMaxOpenConns is only available for go 1.2+ +func (eg *EngineGroup) SetMaxOpenConns(conns int) { + eg.Engine.DB().SetMaxOpenConns(conns) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].DB().SetMaxOpenConns(conns) + } +} + +// SetPolicy set the group policy +func (eg *EngineGroup) SetPolicy(policy GroupPolicy) *EngineGroup { + eg.policy = policy + return eg +} + +// SetQuotePolicy sets the special quote policy +func (eg *EngineGroup) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { + eg.Engine.SetQuotePolicy(quotePolicy) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetQuotePolicy(quotePolicy) + } +} + +// SetTableMapper set the table name mapping rule +func (eg *EngineGroup) SetTableMapper(mapper names.Mapper) { + eg.Engine.SetTableMapper(mapper) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetTableMapper(mapper) + } +} + +// ShowSQL show SQL statement or not on logger if log level is great than INFO +func (eg *EngineGroup) ShowSQL(show ...bool) { + eg.Engine.ShowSQL(show...) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].ShowSQL(show...) + } +} + +// Slave returns one of the physical databases which is a slave according the policy +func (eg *EngineGroup) Slave() *Engine { + switch len(eg.slaves) { + case 0: + return eg.Engine + case 1: + return eg.slaves[0] + } + return eg.policy.Slave(eg) +} + +// Slaves returns all the slaves +func (eg *EngineGroup) Slaves() []*Engine { + return eg.slaves +} diff --git a/vendor/xorm.io/xorm/engine_group_policy.go b/vendor/xorm.io/xorm/engine_group_policy.go new file mode 100644 index 0000000..1def8ce --- /dev/null +++ b/vendor/xorm.io/xorm/engine_group_policy.go @@ -0,0 +1,118 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "math/rand" + "sync" + "time" +) + +// GroupPolicy is be used by chosing the current slave from slaves +type GroupPolicy interface { + Slave(*EngineGroup) *Engine +} + +// GroupPolicyHandler should be used when a function is a GroupPolicy +type GroupPolicyHandler func(*EngineGroup) *Engine + +// Slave implements the chosen of slaves +func (h GroupPolicyHandler) Slave(eg *EngineGroup) *Engine { + return h(eg) +} + +// RandomPolicy implmentes randomly chose the slave of slaves +func RandomPolicy() GroupPolicyHandler { + var r = rand.New(rand.NewSource(time.Now().UnixNano())) + return func(g *EngineGroup) *Engine { + return g.Slaves()[r.Intn(len(g.Slaves()))] + } +} + +// WeightRandomPolicy implmentes randomly chose the slave of slaves +func WeightRandomPolicy(weights []int) GroupPolicyHandler { + var rands = make([]int, 0, len(weights)) + for i := 0; i < len(weights); i++ { + for n := 0; n < weights[i]; n++ { + rands = append(rands, i) + } + } + var r = rand.New(rand.NewSource(time.Now().UnixNano())) + + return func(g *EngineGroup) *Engine { + var slaves = g.Slaves() + idx := rands[r.Intn(len(rands))] + if idx >= len(slaves) { + idx = len(slaves) - 1 + } + return slaves[idx] + } +} + +// RoundRobinPolicy returns a group policy handler +func RoundRobinPolicy() GroupPolicyHandler { + var pos = -1 + var lock sync.Mutex + return func(g *EngineGroup) *Engine { + var slaves = g.Slaves() + + lock.Lock() + defer lock.Unlock() + pos++ + if pos >= len(slaves) { + pos = 0 + } + + return slaves[pos] + } +} + +// WeightRoundRobinPolicy returns a group policy handler +func WeightRoundRobinPolicy(weights []int) GroupPolicyHandler { + var rands = make([]int, 0, len(weights)) + for i := 0; i < len(weights); i++ { + for n := 0; n < weights[i]; n++ { + rands = append(rands, i) + } + } + var pos = -1 + var lock sync.Mutex + + return func(g *EngineGroup) *Engine { + var slaves = g.Slaves() + lock.Lock() + defer lock.Unlock() + pos++ + if pos >= len(rands) { + pos = 0 + } + + idx := rands[pos] + if idx >= len(slaves) { + idx = len(slaves) - 1 + } + return slaves[idx] + } +} + +// LeastConnPolicy implements GroupPolicy, every time will get the least connections slave +func LeastConnPolicy() GroupPolicyHandler { + return func(g *EngineGroup) *Engine { + var slaves = g.Slaves() + connections := 0 + idx := 0 + for i := 0; i < len(slaves); i++ { + openConnections := slaves[i].DB().Stats().OpenConnections + if i == 0 { + connections = openConnections + idx = i + } else if openConnections <= connections { + connections = openConnections + idx = i + } + } + return slaves[idx] + } +} diff --git a/vendor/xorm.io/xorm/error.go b/vendor/xorm.io/xorm/error.go new file mode 100644 index 0000000..cfa5c81 --- /dev/null +++ b/vendor/xorm.io/xorm/error.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" +) + +var ( + // ErrPtrSliceType represents a type error + ErrPtrSliceType = errors.New("A point to a slice is needed") + // ErrParamsType params error + ErrParamsType = errors.New("Params type error") + // ErrTableNotFound table not found error + ErrTableNotFound = errors.New("Table not found") + // ErrUnSupportedType unsupported error + ErrUnSupportedType = errors.New("Unsupported type error") + // ErrNotExist record does not exist error + ErrNotExist = errors.New("Record does not exist") + // ErrCacheFailed cache failed error + ErrCacheFailed = errors.New("Cache failed") + // ErrConditionType condition type unsupported + ErrConditionType = errors.New("Unsupported condition type") +) diff --git a/vendor/xorm.io/xorm/go.mod b/vendor/xorm.io/xorm/go.mod new file mode 100644 index 0000000..e0d22a2 --- /dev/null +++ b/vendor/xorm.io/xorm/go.mod @@ -0,0 +1,14 @@ +module xorm.io/xorm + +go 1.11 + +require ( + github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc + github.com/go-sql-driver/mysql v1.5.0 + github.com/lib/pq v1.7.0 + github.com/mattn/go-sqlite3 v1.14.0 + github.com/stretchr/testify v1.4.0 + github.com/syndtr/goleveldb v1.0.0 + github.com/ziutek/mymysql v1.5.4 + xorm.io/builder v0.3.7 +) diff --git a/vendor/xorm.io/xorm/go.sum b/vendor/xorm.io/xorm/go.sum new file mode 100644 index 0000000..844dd09 --- /dev/null +++ b/vendor/xorm.io/xorm/go.sum @@ -0,0 +1,70 @@ +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= +github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= +xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= diff --git a/vendor/xorm.io/xorm/interface.go b/vendor/xorm.io/xorm/interface.go new file mode 100644 index 0000000..0fe9cbe --- /dev/null +++ b/vendor/xorm.io/xorm/interface.go @@ -0,0 +1,130 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "context" + "database/sql" + "reflect" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/contexts" + "xorm.io/xorm/dialects" + "xorm.io/xorm/log" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" +) + +// Interface defines the interface which Engine, EngineGroup and Session will implementate. +type Interface interface { + AllCols() *Session + Alias(alias string) *Session + Asc(colNames ...string) *Session + BufferSize(size int) *Session + Cols(columns ...string) *Session + Count(...interface{}) (int64, error) + CreateIndexes(bean interface{}) error + CreateUniques(bean interface{}) error + Decr(column string, arg ...interface{}) *Session + Desc(...string) *Session + Delete(interface{}) (int64, error) + Distinct(columns ...string) *Session + DropIndexes(bean interface{}) error + Exec(sqlOrArgs ...interface{}) (sql.Result, error) + Exist(bean ...interface{}) (bool, error) + Find(interface{}, ...interface{}) error + FindAndCount(interface{}, ...interface{}) (int64, error) + Get(interface{}) (bool, error) + GroupBy(keys string) *Session + ID(interface{}) *Session + In(string, ...interface{}) *Session + Incr(column string, arg ...interface{}) *Session + Insert(...interface{}) (int64, error) + InsertOne(interface{}) (int64, error) + IsTableEmpty(bean interface{}) (bool, error) + IsTableExist(beanOrTableName interface{}) (bool, error) + Iterate(interface{}, IterFunc) error + Limit(int, ...int) *Session + MustCols(columns ...string) *Session + NoAutoCondition(...bool) *Session + NotIn(string, ...interface{}) *Session + Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session + Omit(columns ...string) *Session + OrderBy(order string) *Session + Ping() error + Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) + QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) + QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) + Rows(bean interface{}) (*Rows, error) + SetExpr(string, interface{}) *Session + Select(string) *Session + SQL(interface{}, ...interface{}) *Session + Sum(bean interface{}, colName string) (float64, error) + SumInt(bean interface{}, colName string) (int64, error) + Sums(bean interface{}, colNames ...string) ([]float64, error) + SumsInt(bean interface{}, colNames ...string) ([]int64, error) + Table(tableNameOrBean interface{}) *Session + Unscoped() *Session + Update(bean interface{}, condiBeans ...interface{}) (int64, error) + UseBool(...string) *Session + Where(interface{}, ...interface{}) *Session +} + +// EngineInterface defines the interface which Engine, EngineGroup will implementate. +type EngineInterface interface { + Interface + + Before(func(interface{})) *Session + Charset(charset string) *Session + ClearCache(...interface{}) error + Context(context.Context) *Session + CreateTables(...interface{}) error + DBMetas() ([]*schemas.Table, error) + Dialect() dialects.Dialect + DriverName() string + DropTables(...interface{}) error + DumpAllToFile(fp string, tp ...schemas.DBType) error + GetCacher(string) caches.Cacher + GetColumnMapper() names.Mapper + GetDefaultCacher() caches.Cacher + GetTableMapper() names.Mapper + GetTZDatabase() *time.Location + GetTZLocation() *time.Location + ImportFile(fp string) ([]sql.Result, error) + MapCacher(interface{}, caches.Cacher) error + NewSession() *Session + NoAutoTime() *Session + Quote(string) string + SetCacher(string, caches.Cacher) + SetConnMaxLifetime(time.Duration) + SetColumnMapper(names.Mapper) + SetDefaultCacher(caches.Cacher) + SetLogger(logger interface{}) + SetLogLevel(log.LogLevel) + SetMapper(names.Mapper) + SetMaxOpenConns(int) + SetMaxIdleConns(int) + SetQuotePolicy(dialects.QuotePolicy) + SetSchema(string) + SetTableMapper(names.Mapper) + SetTZDatabase(tz *time.Location) + SetTZLocation(tz *time.Location) + AddHook(hook contexts.Hook) + ShowSQL(show ...bool) + Sync(...interface{}) error + Sync2(...interface{}) error + StoreEngine(storeEngine string) *Session + TableInfo(bean interface{}) (*schemas.Table, error) + TableName(interface{}, ...bool) string + UnMapType(reflect.Type) + EnableSessionID(bool) +} + +var ( + _ Interface = &Session{} + _ EngineInterface = &Engine{} + _ EngineInterface = &EngineGroup{} +) diff --git a/vendor/xorm.io/xorm/internal/json/json.go b/vendor/xorm.io/xorm/internal/json/json.go new file mode 100644 index 0000000..c9a2eb4 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/json/json.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import "encoding/json" + +// JSONInterface represents an interface to handle json data +type JSONInterface interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var ( + // DefaultJSONHandler default json handler + DefaultJSONHandler JSONInterface = StdJSON{} +) + +// StdJSON implements JSONInterface via encoding/json +type StdJSON struct{} + +// Marshal implements JSONInterface +func (StdJSON) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal implements JSONInterface +func (StdJSON) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/vendor/xorm.io/xorm/internal/statements/cache.go b/vendor/xorm.io/xorm/internal/statements/cache.go new file mode 100644 index 0000000..cb33df0 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/cache.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) ConvertIDSQL(sqlStr string) string { + if statement.RefTable != nil { + cols := statement.RefTable.PKColumns() + if len(cols) == 0 { + return "" + } + + colstrs := statement.joinColumns(cols, false) + sqls := utils.SplitNNoCase(sqlStr, " from ", 2) + if len(sqls) != 2 { + return "" + } + + var top string + pLimitN := statement.LimitN + if pLimitN != nil && statement.dialect.URI().DBType == schemas.MSSQL { + top = fmt.Sprintf("TOP %d ", *pLimitN) + } + + newsql := fmt.Sprintf("SELECT %s%s FROM %v", top, colstrs, sqls[1]) + return newsql + } + return "" +} + +func (statement *Statement) ConvertUpdateSQL(sqlStr string) (string, string) { + if statement.RefTable == nil || len(statement.RefTable.PrimaryKeys) != 1 { + return "", "" + } + + colstrs := statement.joinColumns(statement.RefTable.PKColumns(), true) + sqls := utils.SplitNNoCase(sqlStr, "where", 2) + if len(sqls) != 2 { + if len(sqls) == 1 { + return sqls[0], fmt.Sprintf("SELECT %v FROM %v", + colstrs, statement.quote(statement.TableName())) + } + return "", "" + } + + var whereStr = sqls[1] + + // TODO: for postgres only, if any other database? + var paraStr string + if statement.dialect.URI().DBType == schemas.POSTGRES { + paraStr = "$" + } else if statement.dialect.URI().DBType == schemas.MSSQL { + paraStr = ":" + } + + if paraStr != "" { + if strings.Contains(sqls[1], paraStr) { + dollers := strings.Split(sqls[1], paraStr) + whereStr = dollers[0] + for i, c := range dollers[1:] { + ccs := strings.SplitN(c, " ", 2) + whereStr += fmt.Sprintf(paraStr+"%v %v", i+1, ccs[1]) + } + } + } + + return sqls[0], fmt.Sprintf("SELECT %v FROM %v WHERE %v", + colstrs, statement.quote(statement.TableName()), + whereStr) +} diff --git a/vendor/xorm.io/xorm/internal/statements/column_map.go b/vendor/xorm.io/xorm/internal/statements/column_map.go new file mode 100644 index 0000000..bb764b4 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/column_map.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "strings" + + "xorm.io/xorm/schemas" +) + +type columnMap []string + +func (m columnMap) Contain(colName string) bool { + if len(m) == 0 { + return false + } + + n := len(colName) + for _, mk := range m { + if len(mk) != n { + continue + } + if strings.EqualFold(mk, colName) { + return true + } + } + + return false +} + +func (m columnMap) Len() int { + return len(m) +} + +func (m columnMap) IsEmpty() bool { + return len(m) == 0 +} + +func (m *columnMap) Add(colName string) bool { + if m.Contain(colName) { + return false + } + *m = append(*m, colName) + return true +} + +func getFlagForColumn(m map[string]bool, col *schemas.Column) (val bool, has bool) { + if len(m) == 0 { + return false, false + } + + n := len(col.Name) + + for mk := range m { + if len(mk) != n { + continue + } + if strings.EqualFold(mk, col.Name) { + return m[mk], true + } + } + + return false, false +} diff --git a/vendor/xorm.io/xorm/internal/statements/expr_param.go b/vendor/xorm.io/xorm/internal/statements/expr_param.go new file mode 100644 index 0000000..6657408 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/expr_param.go @@ -0,0 +1,126 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +type ErrUnsupportedExprType struct { + tp string +} + +func (err ErrUnsupportedExprType) Error() string { + return fmt.Sprintf("Unsupported expression type: %v", err.tp) +} + +type exprParam struct { + colName string + arg interface{} +} + +type exprParams struct { + ColNames []string + Args []interface{} +} + +func (exprs *exprParams) Len() int { + return len(exprs.ColNames) +} + +func (exprs *exprParams) addParam(colName string, arg interface{}) { + exprs.ColNames = append(exprs.ColNames, colName) + exprs.Args = append(exprs.Args, arg) +} + +func (exprs *exprParams) IsColExist(colName string) bool { + for _, name := range exprs.ColNames { + if strings.EqualFold(schemas.CommonQuoter.Trim(name), schemas.CommonQuoter.Trim(colName)) { + return true + } + } + return false +} + +func (exprs *exprParams) getByName(colName string) (exprParam, bool) { + for i, name := range exprs.ColNames { + if strings.EqualFold(name, colName) { + return exprParam{name, exprs.Args[i]}, true + } + } + return exprParam{}, false +} + +func (exprs *exprParams) WriteArgs(w *builder.BytesWriter) error { + for i, expr := range exprs.Args { + switch arg := expr.(type) { + case *builder.Builder: + if _, err := w.WriteString("("); err != nil { + return err + } + if err := arg.WriteTo(w); err != nil { + return err + } + if _, err := w.WriteString(")"); err != nil { + return err + } + case string: + if arg == "" { + arg = "''" + } + if _, err := w.WriteString(fmt.Sprintf("%v", arg)); err != nil { + return err + } + default: + if _, err := w.WriteString("?"); err != nil { + return err + } + w.Append(arg) + } + if i != len(exprs.Args)-1 { + if _, err := w.WriteString(","); err != nil { + return err + } + } + } + return nil +} + +func (exprs *exprParams) writeNameArgs(w *builder.BytesWriter) error { + for i, colName := range exprs.ColNames { + if _, err := w.WriteString(colName); err != nil { + return err + } + if _, err := w.WriteString("="); err != nil { + return err + } + + switch arg := exprs.Args[i].(type) { + case *builder.Builder: + if _, err := w.WriteString("("); err != nil { + return err + } + if err := arg.WriteTo(w); err != nil { + return err + } + if _, err := w.WriteString("("); err != nil { + return err + } + default: + w.Append(exprs.Args[i]) + } + + if i+1 != len(exprs.ColNames) { + if _, err := w.WriteString(","); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/insert.go b/vendor/xorm.io/xorm/internal/statements/insert.go new file mode 100644 index 0000000..6cbbbed --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/insert.go @@ -0,0 +1,207 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) writeInsertOutput(buf *strings.Builder, table *schemas.Table) error { + if statement.dialect.URI().DBType == schemas.MSSQL && len(table.AutoIncrement) > 0 { + if _, err := buf.WriteString(" OUTPUT Inserted."); err != nil { + return err + } + if _, err := buf.WriteString(table.AutoIncrement); err != nil { + return err + } + } + return nil +} + +// GenInsertSQL generates insert beans SQL +func (statement *Statement) GenInsertSQL(colNames []string, args []interface{}) (string, []interface{}, error) { + var ( + buf = builder.NewWriter() + exprs = statement.ExprColumns + table = statement.RefTable + tableName = statement.TableName() + ) + + if _, err := buf.WriteString("INSERT INTO "); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, tableName); err != nil { + return "", nil, err + } + + if len(colNames) <= 0 { + if statement.dialect.URI().DBType == schemas.MYSQL { + if _, err := buf.WriteString(" VALUES ()"); err != nil { + return "", nil, err + } + } else { + if err := statement.writeInsertOutput(buf.Builder, table); err != nil { + return "", nil, err + } + if _, err := buf.WriteString(" DEFAULT VALUES"); err != nil { + return "", nil, err + } + } + } else { + if _, err := buf.WriteString(" ("); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(colNames, exprs.ColNames...), ","); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + if err := statement.writeInsertOutput(buf.Builder, table); err != nil { + return "", nil, err + } + + if statement.Conds().IsValid() { + if _, err := buf.WriteString(" SELECT "); err != nil { + return "", nil, err + } + + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + } + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(" FROM "); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, tableName); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(" WHERE "); err != nil { + return "", nil, err + } + + if err := statement.Conds().WriteTo(buf); err != nil { + return "", nil, err + } + } else { + if _, err := buf.WriteString(" VALUES ("); err != nil { + return "", nil, err + } + + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + } + + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + } + } + + if len(table.AutoIncrement) > 0 && statement.dialect.URI().DBType == schemas.POSTGRES { + if _, err := buf.WriteString(" RETURNING "); err != nil { + return "", nil, err + } + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, table.AutoIncrement); err != nil { + return "", nil, err + } + } + + return buf.String(), buf.Args(), nil +} + +// GenInsertMapSQL generates insert map SQL +func (statement *Statement) GenInsertMapSQL(columns []string, args []interface{}) (string, []interface{}, error) { + var ( + buf = builder.NewWriter() + exprs = statement.ExprColumns + tableName = statement.TableName() + ) + + if _, err := buf.WriteString(fmt.Sprintf("INSERT INTO %s (", statement.quote(tableName))); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(columns, exprs.ColNames...), ","); err != nil { + return "", nil, err + } + + // if insert where + if statement.Conds().IsValid() { + if _, err := buf.WriteString(") SELECT "); err != nil { + return "", nil, err + } + + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + } + + if _, err := buf.WriteString(fmt.Sprintf(" FROM %s WHERE ", statement.quote(tableName))); err != nil { + return "", nil, err + } + + if err := statement.Conds().WriteTo(buf); err != nil { + return "", nil, err + } + } else { + if _, err := buf.WriteString(") VALUES ("); err != nil { + return "", nil, err + } + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + } + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + } + + return buf.String(), buf.Args(), nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/pk.go b/vendor/xorm.io/xorm/internal/statements/pk.go new file mode 100644 index 0000000..59da89c --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/pk.go @@ -0,0 +1,98 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "reflect" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +var ( + ptrPkType = reflect.TypeOf(&schemas.PK{}) + pkType = reflect.TypeOf(schemas.PK{}) + stringType = reflect.TypeOf("") + intType = reflect.TypeOf(int64(0)) + uintType = reflect.TypeOf(uint64(0)) +) + +// ErrIDConditionWithNoTable represents an error there is no reference table with an ID condition +type ErrIDConditionWithNoTable struct { + ID schemas.PK +} + +func (err ErrIDConditionWithNoTable) Error() string { + return fmt.Sprintf("ID condition %#v need reference table", err.ID) +} + +// IsIDConditionWithNoTableErr return true if the err is ErrIDConditionWithNoTable +func IsIDConditionWithNoTableErr(err error) bool { + _, ok := err.(ErrIDConditionWithNoTable) + return ok +} + +// ID generate "where id = ? " statement or for composite key "where key1 = ? and key2 = ?" +func (statement *Statement) ID(id interface{}) *Statement { + switch t := id.(type) { + case *schemas.PK: + statement.idParam = *t + case schemas.PK: + statement.idParam = t + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + statement.idParam = schemas.PK{id} + default: + idValue := reflect.ValueOf(id) + idType := idValue.Type() + + switch idType.Kind() { + case reflect.String: + statement.idParam = schemas.PK{idValue.Convert(stringType).Interface()} + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + statement.idParam = schemas.PK{idValue.Convert(intType).Interface()} + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + statement.idParam = schemas.PK{idValue.Convert(uintType).Interface()} + case reflect.Slice: + if idType.ConvertibleTo(pkType) { + statement.idParam = idValue.Convert(pkType).Interface().(schemas.PK) + } + case reflect.Ptr: + if idType.ConvertibleTo(ptrPkType) { + statement.idParam = idValue.Convert(ptrPkType).Elem().Interface().(schemas.PK) + } + } + } + + if statement.idParam == nil { + statement.LastError = fmt.Errorf("ID param %#v is not supported", id) + } + + return statement +} + +// ProcessIDParam handles the process of id condition +func (statement *Statement) ProcessIDParam() error { + if statement.idParam == nil { + return nil + } + + if statement.RefTable == nil { + return ErrIDConditionWithNoTable{statement.idParam} + } + + if len(statement.RefTable.PrimaryKeys) != len(statement.idParam) { + return fmt.Errorf("ID condition is error, expect %d primarykeys, there are %d", + len(statement.RefTable.PrimaryKeys), + len(statement.idParam), + ) + } + + for i, col := range statement.RefTable.PKColumns() { + var colName = statement.colName(col, statement.TableName()) + statement.cond = statement.cond.And(builder.Eq{colName: statement.idParam[i]}) + } + return nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/query.go b/vendor/xorm.io/xorm/internal/statements/query.go new file mode 100644 index 0000000..ab3021b --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/query.go @@ -0,0 +1,441 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) GenQuerySQL(sqlOrArgs ...interface{}) (string, []interface{}, error) { + if len(sqlOrArgs) > 0 { + return statement.ConvertSQLOrArgs(sqlOrArgs...) + } + + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + if statement.JoinStr == "" { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = "*" + } + } + } + if columnStr == "" { + columnStr = "*" + } + } + + if err := statement.ProcessIDParam(); err != nil { + return "", nil, err + } + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + args := append(statement.joinArgs, condArgs...) + + // for mssql and use limit + qs := strings.Count(sqlStr, "?") + if len(args)*2 == qs { + args = append(args, args...) + } + + return sqlStr, args, nil +} + +func (statement *Statement) GenSumSQL(bean interface{}, columns ...string) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + statement.SetRefBean(bean) + + var sumStrs = make([]string, 0, len(columns)) + for _, colName := range columns { + if !strings.Contains(colName, " ") && !strings.Contains(colName, "(") { + colName = statement.quote(colName) + } else { + colName = statement.ReplaceQuote(colName) + } + sumStrs = append(sumStrs, fmt.Sprintf("COALESCE(sum(%s),0)", colName)) + } + sumSelect := strings.Join(sumStrs, ", ") + + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + + sqlStr, condArgs, err := statement.genSelectSQL(sumSelect, true, true) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +func (statement *Statement) GenGetSQL(bean interface{}) (string, []interface{}, error) { + v := rValue(bean) + isStruct := v.Kind() == reflect.Struct + if isStruct { + statement.SetRefBean(bean) + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + // TODO: always generate column names, not use * even if join + if len(statement.JoinStr) == 0 { + if len(columnStr) == 0 { + if len(statement.GroupByStr) > 0 { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if len(columnStr) == 0 { + if len(statement.GroupByStr) > 0 { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } + } + } + } + + if len(columnStr) == 0 { + columnStr = "*" + } + + if isStruct { + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + } else { + if err := statement.ProcessIDParam(); err != nil { + return "", nil, err + } + } + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +// GenCountSQL generates the SQL for counting +func (statement *Statement) GenCountSQL(beans ...interface{}) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var condArgs []interface{} + var err error + if len(beans) > 0 { + statement.SetRefBean(beans[0]) + if err := statement.mergeConds(beans[0]); err != nil { + return "", nil, err + } + } + + var selectSQL = statement.SelectStr + if len(selectSQL) <= 0 { + if statement.IsDistinct { + selectSQL = fmt.Sprintf("count(DISTINCT %s)", statement.ColumnStr()) + } else if statement.ColumnStr() != "" { + selectSQL = fmt.Sprintf("count(%s)", statement.ColumnStr()) + } else { + selectSQL = "count(*)" + } + } + sqlStr, condArgs, err := statement.genSelectSQL(selectSQL, false, false) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +func (statement *Statement) genSelectSQL(columnStr string, needLimit, needOrderBy bool) (string, []interface{}, error) { + var ( + distinct string + dialect = statement.dialect + quote = statement.quote + fromStr = " FROM " + top, mssqlCondi, whereStr string + ) + if statement.IsDistinct && !strings.HasPrefix(columnStr, "count") { + distinct = "DISTINCT " + } + + condSQL, condArgs, err := statement.GenCondSQL(statement.cond) + if err != nil { + return "", nil, err + } + if len(condSQL) > 0 { + whereStr = " WHERE " + condSQL + } + + if dialect.URI().DBType == schemas.MSSQL && strings.Contains(statement.TableName(), "..") { + fromStr += statement.TableName() + } else { + fromStr += quote(statement.TableName()) + } + + if statement.TableAlias != "" { + if dialect.URI().DBType == schemas.ORACLE { + fromStr += " " + quote(statement.TableAlias) + } else { + fromStr += " AS " + quote(statement.TableAlias) + } + } + if statement.JoinStr != "" { + fromStr = fmt.Sprintf("%v %v", fromStr, statement.JoinStr) + } + + pLimitN := statement.LimitN + if dialect.URI().DBType == schemas.MSSQL { + if pLimitN != nil { + LimitNValue := *pLimitN + top = fmt.Sprintf("TOP %d ", LimitNValue) + } + if statement.Start > 0 { + var column string + if len(statement.RefTable.PKColumns()) == 0 { + for _, index := range statement.RefTable.Indexes { + if len(index.Cols) == 1 { + column = index.Cols[0] + break + } + } + if len(column) == 0 { + column = statement.RefTable.ColumnsSeq()[0] + } + } else { + column = statement.RefTable.PKColumns()[0].Name + } + if statement.needTableName() { + if len(statement.TableAlias) > 0 { + column = statement.TableAlias + "." + column + } else { + column = statement.TableName() + "." + column + } + } + + var orderStr string + if needOrderBy && len(statement.OrderStr) > 0 { + orderStr = " ORDER BY " + statement.OrderStr + } + + var groupStr string + if len(statement.GroupByStr) > 0 { + groupStr = " GROUP BY " + statement.GroupByStr + } + mssqlCondi = fmt.Sprintf("(%s NOT IN (SELECT TOP %d %s%s%s%s%s))", + column, statement.Start, column, fromStr, whereStr, orderStr, groupStr) + } + } + + var buf strings.Builder + fmt.Fprintf(&buf, "SELECT %v%v%v%v%v", distinct, top, columnStr, fromStr, whereStr) + if len(mssqlCondi) > 0 { + if len(whereStr) > 0 { + fmt.Fprint(&buf, " AND ", mssqlCondi) + } else { + fmt.Fprint(&buf, " WHERE ", mssqlCondi) + } + } + + if statement.GroupByStr != "" { + fmt.Fprint(&buf, " GROUP BY ", statement.GroupByStr) + } + if statement.HavingStr != "" { + fmt.Fprint(&buf, " ", statement.HavingStr) + } + if needOrderBy && statement.OrderStr != "" { + fmt.Fprint(&buf, " ORDER BY ", statement.OrderStr) + } + if needLimit { + if dialect.URI().DBType != schemas.MSSQL && dialect.URI().DBType != schemas.ORACLE { + if statement.Start > 0 { + if pLimitN != nil { + fmt.Fprintf(&buf, " LIMIT %v OFFSET %v", *pLimitN, statement.Start) + } else { + fmt.Fprintf(&buf, "LIMIT 0 OFFSET %v", statement.Start) + } + } else if pLimitN != nil { + fmt.Fprint(&buf, " LIMIT ", *pLimitN) + } + } else if dialect.URI().DBType == schemas.ORACLE { + if statement.Start != 0 || pLimitN != nil { + oldString := buf.String() + buf.Reset() + rawColStr := columnStr + if rawColStr == "*" { + rawColStr = "at.*" + } + fmt.Fprintf(&buf, "SELECT %v FROM (SELECT %v,ROWNUM RN FROM (%v) at WHERE ROWNUM <= %d) aat WHERE RN > %d", + columnStr, rawColStr, oldString, statement.Start+*pLimitN, statement.Start) + } + } + } + if statement.IsForUpdate { + return dialect.ForUpdateSQL(buf.String()), condArgs, nil + } + + return buf.String(), condArgs, nil +} + +func (statement *Statement) GenExistSQL(bean ...interface{}) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var sqlStr string + var args []interface{} + var joinStr string + var err error + if len(bean) == 0 { + tableName := statement.TableName() + if len(tableName) <= 0 { + return "", nil, ErrTableNotFound + } + + tableName = statement.quote(tableName) + if len(statement.JoinStr) > 0 { + joinStr = statement.JoinStr + } + + if statement.Conds().IsValid() { + condSQL, condArgs, err := statement.GenCondSQL(statement.Conds()) + if err != nil { + return "", nil, err + } + + if statement.dialect.URI().DBType == schemas.MSSQL { + sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s %s WHERE %s", tableName, joinStr, condSQL) + } else if statement.dialect.URI().DBType == schemas.ORACLE { + sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE (%s) %s AND ROWNUM=1", tableName, joinStr, condSQL) + } else { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s WHERE %s LIMIT 1", tableName, joinStr, condSQL) + } + args = condArgs + } else { + if statement.dialect.URI().DBType == schemas.MSSQL { + sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s %s", tableName, joinStr) + } else if statement.dialect.URI().DBType == schemas.ORACLE { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s WHERE ROWNUM=1", tableName, joinStr) + } else { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s LIMIT 1", tableName, joinStr) + } + args = []interface{}{} + } + } else { + beanValue := reflect.ValueOf(bean[0]) + if beanValue.Kind() != reflect.Ptr { + return "", nil, errors.New("needs a pointer") + } + + if beanValue.Elem().Kind() == reflect.Struct { + if err := statement.SetRefBean(bean[0]); err != nil { + return "", nil, err + } + } + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + statement.Limit(1) + sqlStr, args, err = statement.GenGetSQL(bean[0]) + if err != nil { + return "", nil, err + } + } + + return sqlStr, args, nil +} + +func (statement *Statement) GenFindSQL(autoCond builder.Cond) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var sqlStr string + var args []interface{} + var err error + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + if statement.JoinStr == "" { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = "*" + } + } + } + if columnStr == "" { + columnStr = "*" + } + } + + statement.cond = statement.cond.And(autoCond) + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + args = append(statement.joinArgs, condArgs...) + // for mssql and use limit + qs := strings.Count(sqlStr, "?") + if len(args)*2 == qs { + args = append(args, args...) + } + + return sqlStr, args, nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/statement.go b/vendor/xorm.io/xorm/internal/statements/statement.go new file mode 100644 index 0000000..ed7bdae --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/statement.go @@ -0,0 +1,995 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "xorm.io/builder" + "xorm.io/xorm/contexts" + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" + "xorm.io/xorm/tags" +) + +var ( + // ErrConditionType condition type unsupported + ErrConditionType = errors.New("Unsupported condition type") + // ErrUnSupportedSQLType parameter of SQL is not supported + ErrUnSupportedSQLType = errors.New("Unsupported sql type") + // ErrUnSupportedType unsupported error + ErrUnSupportedType = errors.New("Unsupported type error") + // ErrTableNotFound table not found error + ErrTableNotFound = errors.New("Table not found") +) + +// Statement save all the sql info for executing SQL +type Statement struct { + RefTable *schemas.Table + dialect dialects.Dialect + defaultTimeZone *time.Location + tagParser *tags.Parser + Start int + LimitN *int + idParam schemas.PK + OrderStr string + JoinStr string + joinArgs []interface{} + GroupByStr string + HavingStr string + SelectStr string + useAllCols bool + AltTableName string + tableName string + RawSQL string + RawParams []interface{} + UseCascade bool + UseAutoJoin bool + StoreEngine string + Charset string + UseCache bool + UseAutoTime bool + NoAutoCondition bool + IsDistinct bool + IsForUpdate bool + TableAlias string + allUseBool bool + CheckVersion bool + unscoped bool + ColumnMap columnMap + OmitColumnMap columnMap + MustColumnMap map[string]bool + NullableMap map[string]bool + IncrColumns exprParams + DecrColumns exprParams + ExprColumns exprParams + cond builder.Cond + BufferSize int + Context contexts.ContextCache + LastError error +} + +// NewStatement creates a new statement +func NewStatement(dialect dialects.Dialect, tagParser *tags.Parser, defaultTimeZone *time.Location) *Statement { + statement := &Statement{ + dialect: dialect, + tagParser: tagParser, + defaultTimeZone: defaultTimeZone, + } + statement.Reset() + return statement +} + +func (statement *Statement) SetTableName(tableName string) { + statement.tableName = tableName +} + +func (statement *Statement) omitStr() string { + return statement.dialect.Quoter().Join(statement.OmitColumnMap, " ,") +} + +// GenRawSQL generates correct raw sql +func (statement *Statement) GenRawSQL() string { + return statement.ReplaceQuote(statement.RawSQL) +} + +func (statement *Statement) GenCondSQL(condOrBuilder interface{}) (string, []interface{}, error) { + condSQL, condArgs, err := builder.ToSQL(condOrBuilder) + if err != nil { + return "", nil, err + } + return statement.ReplaceQuote(condSQL), condArgs, nil +} + +func (statement *Statement) ReplaceQuote(sql string) string { + if sql == "" || statement.dialect.URI().DBType == schemas.MYSQL || + statement.dialect.URI().DBType == schemas.SQLITE { + return sql + } + return statement.dialect.Quoter().Replace(sql) +} + +func (statement *Statement) SetContextCache(ctxCache contexts.ContextCache) { + statement.Context = ctxCache +} + +// Init reset all the statement's fields +func (statement *Statement) Reset() { + statement.RefTable = nil + statement.Start = 0 + statement.LimitN = nil + statement.OrderStr = "" + statement.UseCascade = true + statement.JoinStr = "" + statement.joinArgs = make([]interface{}, 0) + statement.GroupByStr = "" + statement.HavingStr = "" + statement.ColumnMap = columnMap{} + statement.OmitColumnMap = columnMap{} + statement.AltTableName = "" + statement.tableName = "" + statement.idParam = nil + statement.RawSQL = "" + statement.RawParams = make([]interface{}, 0) + statement.UseCache = true + statement.UseAutoTime = true + statement.NoAutoCondition = false + statement.IsDistinct = false + statement.IsForUpdate = false + statement.TableAlias = "" + statement.SelectStr = "" + statement.allUseBool = false + statement.useAllCols = false + statement.MustColumnMap = make(map[string]bool) + statement.NullableMap = make(map[string]bool) + statement.CheckVersion = true + statement.unscoped = false + statement.IncrColumns = exprParams{} + statement.DecrColumns = exprParams{} + statement.ExprColumns = exprParams{} + statement.cond = builder.NewCond() + statement.BufferSize = 0 + statement.Context = nil + statement.LastError = nil +} + +// NoAutoCondition if you do not want convert bean's field as query condition, then use this function +func (statement *Statement) SetNoAutoCondition(no ...bool) *Statement { + statement.NoAutoCondition = true + if len(no) > 0 { + statement.NoAutoCondition = no[0] + } + return statement +} + +// Alias set the table alias +func (statement *Statement) Alias(alias string) *Statement { + statement.TableAlias = alias + return statement +} + +// SQL adds raw sql statement +func (statement *Statement) SQL(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case (*builder.Builder): + var err error + statement.RawSQL, statement.RawParams, err = query.(*builder.Builder).ToSQL() + if err != nil { + statement.LastError = err + } + case string: + statement.RawSQL = query.(string) + statement.RawParams = args + default: + statement.LastError = ErrUnSupportedSQLType + } + + return statement +} + +// Where add Where statement +func (statement *Statement) Where(query interface{}, args ...interface{}) *Statement { + return statement.And(query, args...) +} + +func (statement *Statement) quote(s string) string { + return statement.dialect.Quoter().Quote(s) +} + +// And add Where & and statement +func (statement *Statement) And(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case string: + cond := builder.Expr(query.(string), args...) + statement.cond = statement.cond.And(cond) + case map[string]interface{}: + queryMap := query.(map[string]interface{}) + newMap := make(map[string]interface{}) + for k, v := range queryMap { + newMap[statement.quote(k)] = v + } + statement.cond = statement.cond.And(builder.Eq(newMap)) + case builder.Cond: + cond := query.(builder.Cond) + statement.cond = statement.cond.And(cond) + for _, v := range args { + if vv, ok := v.(builder.Cond); ok { + statement.cond = statement.cond.And(vv) + } + } + default: + statement.LastError = ErrConditionType + } + + return statement +} + +// Or add Where & Or statement +func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case string: + cond := builder.Expr(query.(string), args...) + statement.cond = statement.cond.Or(cond) + case map[string]interface{}: + cond := builder.Eq(query.(map[string]interface{})) + statement.cond = statement.cond.Or(cond) + case builder.Cond: + cond := query.(builder.Cond) + statement.cond = statement.cond.Or(cond) + for _, v := range args { + if vv, ok := v.(builder.Cond); ok { + statement.cond = statement.cond.Or(vv) + } + } + default: + // TODO: not support condition type + } + return statement +} + +// In generate "Where column IN (?) " statement +func (statement *Statement) In(column string, args ...interface{}) *Statement { + in := builder.In(statement.quote(column), args...) + statement.cond = statement.cond.And(in) + return statement +} + +// NotIn generate "Where column NOT IN (?) " statement +func (statement *Statement) NotIn(column string, args ...interface{}) *Statement { + notIn := builder.NotIn(statement.quote(column), args...) + statement.cond = statement.cond.And(notIn) + return statement +} + +func (statement *Statement) SetRefValue(v reflect.Value) error { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(reflect.Indirect(v)) + if err != nil { + return err + } + statement.tableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), v, true) + return nil +} + +func rValue(bean interface{}) reflect.Value { + return reflect.Indirect(reflect.ValueOf(bean)) +} + +func (statement *Statement) SetRefBean(bean interface{}) error { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(rValue(bean)) + if err != nil { + return err + } + statement.tableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), bean, true) + return nil +} + +func (statement *Statement) needTableName() bool { + return len(statement.JoinStr) > 0 +} + +func (statement *Statement) colName(col *schemas.Column, tableName string) string { + if statement.needTableName() { + var nm = tableName + if len(statement.TableAlias) > 0 { + nm = statement.TableAlias + } + return statement.quote(nm) + "." + statement.quote(col.Name) + } + return statement.quote(col.Name) +} + +// TableName return current tableName +func (statement *Statement) TableName() string { + if statement.AltTableName != "" { + return statement.AltTableName + } + + return statement.tableName +} + +// Incr Generate "Update ... Set column = column + arg" statement +func (statement *Statement) Incr(column string, arg ...interface{}) *Statement { + if len(arg) > 0 { + statement.IncrColumns.addParam(column, arg[0]) + } else { + statement.IncrColumns.addParam(column, 1) + } + return statement +} + +// Decr Generate "Update ... Set column = column - arg" statement +func (statement *Statement) Decr(column string, arg ...interface{}) *Statement { + if len(arg) > 0 { + statement.DecrColumns.addParam(column, arg[0]) + } else { + statement.DecrColumns.addParam(column, 1) + } + return statement +} + +// SetExpr Generate "Update ... Set column = {expression}" statement +func (statement *Statement) SetExpr(column string, expression interface{}) *Statement { + if e, ok := expression.(string); ok { + statement.ExprColumns.addParam(column, statement.dialect.Quoter().Replace(e)) + } else { + statement.ExprColumns.addParam(column, expression) + } + return statement +} + +// Distinct generates "DISTINCT col1, col2 " statement +func (statement *Statement) Distinct(columns ...string) *Statement { + statement.IsDistinct = true + statement.Cols(columns...) + return statement +} + +// ForUpdate generates "SELECT ... FOR UPDATE" statement +func (statement *Statement) ForUpdate() *Statement { + statement.IsForUpdate = true + return statement +} + +// Select replace select +func (statement *Statement) Select(str string) *Statement { + statement.SelectStr = statement.ReplaceQuote(str) + return statement +} + +func col2NewCols(columns ...string) []string { + newColumns := make([]string, 0, len(columns)) + for _, col := range columns { + col = strings.Replace(col, "`", "", -1) + col = strings.Replace(col, `"`, "", -1) + ccols := strings.Split(col, ",") + for _, c := range ccols { + newColumns = append(newColumns, strings.TrimSpace(c)) + } + } + return newColumns +} + +// Cols generate "col1, col2" statement +func (statement *Statement) Cols(columns ...string) *Statement { + cols := col2NewCols(columns...) + for _, nc := range cols { + statement.ColumnMap.Add(nc) + } + return statement +} + +func (statement *Statement) ColumnStr() string { + return statement.dialect.Quoter().Join(statement.ColumnMap, ", ") +} + +// AllCols update use only: update all columns +func (statement *Statement) AllCols() *Statement { + statement.useAllCols = true + return statement +} + +// MustCols update use only: must update columns +func (statement *Statement) MustCols(columns ...string) *Statement { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.MustColumnMap[strings.ToLower(nc)] = true + } + return statement +} + +// UseBool indicates that use bool fields as update contents and query contiditions +func (statement *Statement) UseBool(columns ...string) *Statement { + if len(columns) > 0 { + statement.MustCols(columns...) + } else { + statement.allUseBool = true + } + return statement +} + +// Omit do not use the columns +func (statement *Statement) Omit(columns ...string) { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.OmitColumnMap = append(statement.OmitColumnMap, nc) + } +} + +// Nullable Update use only: update columns to null when value is nullable and zero-value +func (statement *Statement) Nullable(columns ...string) { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.NullableMap[strings.ToLower(nc)] = true + } +} + +// Top generate LIMIT limit statement +func (statement *Statement) Top(limit int) *Statement { + statement.Limit(limit) + return statement +} + +// Limit generate LIMIT start, limit statement +func (statement *Statement) Limit(limit int, start ...int) *Statement { + statement.LimitN = &limit + if len(start) > 0 { + statement.Start = start[0] + } + return statement +} + +// OrderBy generate "Order By order" statement +func (statement *Statement) OrderBy(order string) *Statement { + if len(statement.OrderStr) > 0 { + statement.OrderStr += ", " + } + statement.OrderStr += statement.ReplaceQuote(order) + return statement +} + +// Desc generate `ORDER BY xx DESC` +func (statement *Statement) Desc(colNames ...string) *Statement { + var buf strings.Builder + if len(statement.OrderStr) > 0 { + fmt.Fprint(&buf, statement.OrderStr, ", ") + } + for i, col := range colNames { + if i > 0 { + fmt.Fprint(&buf, ", ") + } + statement.dialect.Quoter().QuoteTo(&buf, col) + fmt.Fprint(&buf, " DESC") + } + statement.OrderStr = buf.String() + return statement +} + +// Asc provide asc order by query condition, the input parameters are columns. +func (statement *Statement) Asc(colNames ...string) *Statement { + var buf strings.Builder + if len(statement.OrderStr) > 0 { + fmt.Fprint(&buf, statement.OrderStr, ", ") + } + for i, col := range colNames { + if i > 0 { + fmt.Fprint(&buf, ", ") + } + statement.dialect.Quoter().QuoteTo(&buf, col) + fmt.Fprint(&buf, " ASC") + } + statement.OrderStr = buf.String() + return statement +} + +func (statement *Statement) Conds() builder.Cond { + return statement.cond +} + +// Table tempororily set table name, the parameter could be a string or a pointer of struct +func (statement *Statement) SetTable(tableNameOrBean interface{}) error { + v := rValue(tableNameOrBean) + t := v.Type() + if t.Kind() == reflect.Struct { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(v) + if err != nil { + return err + } + } + + statement.AltTableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), tableNameOrBean, true) + return nil +} + +// Join The joinOP should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN +func (statement *Statement) Join(joinOP string, tablename interface{}, condition string, args ...interface{}) *Statement { + var buf strings.Builder + if len(statement.JoinStr) > 0 { + fmt.Fprintf(&buf, "%v %v JOIN ", statement.JoinStr, joinOP) + } else { + fmt.Fprintf(&buf, "%v JOIN ", joinOP) + } + + switch tp := tablename.(type) { + case builder.Builder: + subSQL, subQueryArgs, err := tp.ToSQL() + if err != nil { + statement.LastError = err + return statement + } + + fields := strings.Split(tp.TableName(), ".") + aliasName := statement.dialect.Quoter().Trim(fields[len(fields)-1]) + aliasName = schemas.CommonQuoter.Trim(aliasName) + + fmt.Fprintf(&buf, "(%s) %s ON %v", statement.ReplaceQuote(subSQL), aliasName, statement.ReplaceQuote(condition)) + statement.joinArgs = append(statement.joinArgs, subQueryArgs...) + case *builder.Builder: + subSQL, subQueryArgs, err := tp.ToSQL() + if err != nil { + statement.LastError = err + return statement + } + + fields := strings.Split(tp.TableName(), ".") + aliasName := statement.dialect.Quoter().Trim(fields[len(fields)-1]) + aliasName = schemas.CommonQuoter.Trim(aliasName) + + fmt.Fprintf(&buf, "(%s) %s ON %v", statement.ReplaceQuote(subSQL), aliasName, statement.ReplaceQuote(condition)) + statement.joinArgs = append(statement.joinArgs, subQueryArgs...) + default: + tbName := dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), tablename, true) + if !utils.IsSubQuery(tbName) { + var buf strings.Builder + statement.dialect.Quoter().QuoteTo(&buf, tbName) + tbName = buf.String() + } + fmt.Fprintf(&buf, "%s ON %v", tbName, statement.ReplaceQuote(condition)) + } + + statement.JoinStr = buf.String() + statement.joinArgs = append(statement.joinArgs, args...) + return statement +} + +// tbName get some table's table name +func (statement *Statement) tbNameNoSchema(table *schemas.Table) string { + if len(statement.AltTableName) > 0 { + return statement.AltTableName + } + + return table.Name +} + +// GroupBy generate "Group By keys" statement +func (statement *Statement) GroupBy(keys string) *Statement { + statement.GroupByStr = statement.ReplaceQuote(keys) + return statement +} + +// Having generate "Having conditions" statement +func (statement *Statement) Having(conditions string) *Statement { + statement.HavingStr = fmt.Sprintf("HAVING %v", statement.ReplaceQuote(conditions)) + return statement +} + +// Unscoped always disable struct tag "deleted" +func (statement *Statement) SetUnscoped() *Statement { + statement.unscoped = true + return statement +} + +func (statement *Statement) GetUnscoped() bool { + return statement.unscoped +} + +func (statement *Statement) genColumnStr() string { + if statement.RefTable == nil { + return "" + } + + var buf strings.Builder + columns := statement.RefTable.Columns() + + for _, col := range columns { + if statement.OmitColumnMap.Contain(col.Name) { + continue + } + + if len(statement.ColumnMap) > 0 && !statement.ColumnMap.Contain(col.Name) { + continue + } + + if col.MapType == schemas.ONLYTODB { + continue + } + + if buf.Len() != 0 { + buf.WriteString(", ") + } + + if statement.JoinStr != "" { + if statement.TableAlias != "" { + buf.WriteString(statement.TableAlias) + } else { + buf.WriteString(statement.TableName()) + } + + buf.WriteString(".") + } + + statement.dialect.Quoter().QuoteTo(&buf, col.Name) + } + + return buf.String() +} + +func (statement *Statement) GenCreateTableSQL() []string { + statement.RefTable.StoreEngine = statement.StoreEngine + statement.RefTable.Charset = statement.Charset + s, _ := statement.dialect.CreateTableSQL(statement.RefTable, statement.TableName()) + return s +} + +func (statement *Statement) GenIndexSQL() []string { + var sqls []string + tbName := statement.TableName() + for _, index := range statement.RefTable.Indexes { + if index.Type == schemas.IndexType { + sql := statement.dialect.CreateIndexSQL(tbName, index) + sqls = append(sqls, sql) + } + } + return sqls +} + +func uniqueName(tableName, uqeName string) string { + return fmt.Sprintf("UQE_%v_%v", tableName, uqeName) +} + +func (statement *Statement) GenUniqueSQL() []string { + var sqls []string + tbName := statement.TableName() + for _, index := range statement.RefTable.Indexes { + if index.Type == schemas.UniqueType { + sql := statement.dialect.CreateIndexSQL(tbName, index) + sqls = append(sqls, sql) + } + } + return sqls +} + +func (statement *Statement) GenDelIndexSQL() []string { + var sqls []string + tbName := statement.TableName() + idx := strings.Index(tbName, ".") + if idx > -1 { + tbName = tbName[idx+1:] + } + for _, index := range statement.RefTable.Indexes { + sqls = append(sqls, statement.dialect.DropIndexSQL(tbName, index)) + } + return sqls +} + +func (statement *Statement) buildConds2(table *schemas.Table, bean interface{}, + includeVersion bool, includeUpdated bool, includeNil bool, + includeAutoIncr bool, allUseBool bool, useAllCols bool, unscoped bool, + mustColumnMap map[string]bool, tableName, aliasName string, addedTableName bool) (builder.Cond, error) { + var conds []builder.Cond + for _, col := range table.Columns() { + if !includeVersion && col.IsVersion { + continue + } + if !includeUpdated && col.IsUpdated { + continue + } + if !includeAutoIncr && col.IsAutoIncrement { + continue + } + + if statement.dialect.URI().DBType == schemas.MSSQL && (col.SQLType.Name == schemas.Text || + col.SQLType.IsBlob() || col.SQLType.Name == schemas.TimeStampz) { + continue + } + if col.SQLType.IsJson() { + continue + } + + var colName string + if addedTableName { + var nm = tableName + if len(aliasName) > 0 { + nm = aliasName + } + colName = statement.quote(nm) + "." + statement.quote(col.Name) + } else { + colName = statement.quote(col.Name) + } + + fieldValuePtr, err := col.ValueOf(bean) + if err != nil { + if !strings.Contains(err.Error(), "is not valid") { + //engine.logger.Warn(err) + } + continue + } + + if col.IsDeleted && !unscoped { // tag "deleted" is enabled + conds = append(conds, statement.CondDeleted(col)) + } + + fieldValue := *fieldValuePtr + if fieldValue.Interface() == nil { + continue + } + + fieldType := reflect.TypeOf(fieldValue.Interface()) + requiredField := useAllCols + + if b, ok := getFlagForColumn(mustColumnMap, col); ok { + if b { + requiredField = true + } else { + continue + } + } + + if fieldType.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + if includeNil { + conds = append(conds, builder.Eq{colName: nil}) + } + continue + } else if !fieldValue.IsValid() { + continue + } else { + // dereference ptr type to instance type + fieldValue = fieldValue.Elem() + fieldType = reflect.TypeOf(fieldValue.Interface()) + requiredField = true + } + } + + var val interface{} + switch fieldType.Kind() { + case reflect.Bool: + if allUseBool || requiredField { + val = fieldValue.Interface() + } else { + // if a bool in a struct, it will not be as a condition because it default is false, + // please use Where() instead + continue + } + case reflect.String: + if !requiredField && fieldValue.String() == "" { + continue + } + // for MyString, should convert to string or panic + if fieldType.String() != reflect.String.String() { + val = fieldValue.String() + } else { + val = fieldValue.Interface() + } + case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: + if !requiredField && fieldValue.Int() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Float32, reflect.Float64: + if !requiredField && fieldValue.Float() == 0.0 { + continue + } + val = fieldValue.Interface() + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + if !requiredField && fieldValue.Uint() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { + continue + } + val = dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + } else if _, ok := reflect.New(fieldType).Interface().(convert.Conversion); ok { + continue + } else if valNul, ok := fieldValue.Interface().(driver.Valuer); ok { + val, _ = valNul.Value() + if val == nil && !requiredField { + continue + } + } else { + if col.SQLType.IsJson() { + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = bytes + } + } else { + table, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + val = fieldValue.Interface() + } else { + if len(table.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) + // fix non-int pk issues + //if pkField.Int() != 0 { + if pkField.IsValid() && !utils.IsZero(pkField.Interface()) { + val = pkField.Interface() + } else { + continue + } + } else { + //TODO: how to handler? + return nil, fmt.Errorf("not supported %v as %v", fieldValue.Interface(), table.PrimaryKeys) + } + } + } + } + case reflect.Array: + continue + case reflect.Slice, reflect.Map: + if fieldValue == reflect.Zero(fieldType) { + continue + } + if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { + continue + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if (fieldType.Kind() == reflect.Array || fieldType.Kind() == reflect.Slice) && + fieldType.Elem().Kind() == reflect.Uint8 { + if fieldValue.Len() > 0 { + val = fieldValue.Bytes() + } else { + continue + } + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = bytes + } + } else { + continue + } + default: + val = fieldValue.Interface() + } + + conds = append(conds, builder.Eq{colName: val}) + } + + return builder.And(conds...), nil +} + +func (statement *Statement) BuildConds(table *schemas.Table, bean interface{}, includeVersion bool, includeUpdated bool, includeNil bool, includeAutoIncr bool, addedTableName bool) (builder.Cond, error) { + return statement.buildConds2(table, bean, includeVersion, includeUpdated, includeNil, includeAutoIncr, statement.allUseBool, statement.useAllCols, + statement.unscoped, statement.MustColumnMap, statement.TableName(), statement.TableAlias, addedTableName) +} + +func (statement *Statement) mergeConds(bean interface{}) error { + if !statement.NoAutoCondition && statement.RefTable != nil { + var addedTableName = (len(statement.JoinStr) > 0) + autoCond, err := statement.BuildConds(statement.RefTable, bean, true, true, false, true, addedTableName) + if err != nil { + return err + } + statement.cond = statement.cond.And(autoCond) + } + + if err := statement.ProcessIDParam(); err != nil { + return err + } + return nil +} + +func (statement *Statement) GenConds(bean interface{}) (string, []interface{}, error) { + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + + return statement.GenCondSQL(statement.cond) +} + +func (statement *Statement) quoteColumnStr(columnStr string) string { + columns := strings.Split(columnStr, ",") + return statement.dialect.Quoter().Join(columns, ",") +} + +func (statement *Statement) ConvertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { + sql, args, err := convertSQLOrArgs(sqlOrArgs...) + if err != nil { + return "", nil, err + } + return statement.ReplaceQuote(sql), args, nil +} + +func convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { + switch sqlOrArgs[0].(type) { + case string: + return sqlOrArgs[0].(string), sqlOrArgs[1:], nil + case *builder.Builder: + return sqlOrArgs[0].(*builder.Builder).ToSQL() + case builder.Builder: + bd := sqlOrArgs[0].(builder.Builder) + return bd.ToSQL() + } + + return "", nil, ErrUnSupportedType +} + +func (statement *Statement) joinColumns(cols []*schemas.Column, includeTableName bool) string { + var colnames = make([]string, len(cols)) + for i, col := range cols { + if includeTableName { + colnames[i] = statement.quote(statement.TableName()) + + "." + statement.quote(col.Name) + } else { + colnames[i] = statement.quote(col.Name) + } + } + return strings.Join(colnames, ", ") +} + +// CondDeleted returns the conditions whether a record is soft deleted. +func (statement *Statement) CondDeleted(col *schemas.Column) builder.Cond { + var colName = col.Name + if statement.JoinStr != "" { + var prefix string + if statement.TableAlias != "" { + prefix = statement.TableAlias + } else { + prefix = statement.TableName() + } + colName = statement.quote(prefix) + "." + statement.quote(col.Name) + } + var cond = builder.NewCond() + if col.SQLType.IsNumeric() { + cond = builder.Eq{colName: 0} + } else { + // FIXME: mssql: The conversion of a nvarchar data type to a datetime data type resulted in an out-of-range value. + if statement.dialect.URI().DBType != schemas.MSSQL { + cond = builder.Eq{colName: utils.ZeroTime1} + } + } + + if col.Nullable { + cond = cond.Or(builder.IsNull{colName}) + } + + return cond +} diff --git a/vendor/xorm.io/xorm/internal/statements/statement_args.go b/vendor/xorm.io/xorm/internal/statements/statement_args.go new file mode 100644 index 0000000..dc14467 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/statement_args.go @@ -0,0 +1,132 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "reflect" + "strings" + "time" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +func quoteNeeded(a interface{}) bool { + switch a.(type) { + case int, int8, int16, int32, int64: + return false + case uint, uint8, uint16, uint32, uint64: + return false + case float32, float64: + return false + case bool: + return false + case string: + return true + case time.Time, *time.Time: + return true + case builder.Builder, *builder.Builder: + return false + } + + t := reflect.TypeOf(a) + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return false + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return false + case reflect.Float32, reflect.Float64: + return false + case reflect.Bool: + return false + case reflect.String: + return true + } + + return true +} + +func convertStringSingleQuote(arg string) string { + return "'" + strings.Replace(arg, "'", "''", -1) + "'" +} + +func convertString(arg string) string { + var buf strings.Builder + buf.WriteRune('\'') + for _, c := range arg { + if c == '\\' || c == '\'' { + buf.WriteRune('\\') + } + buf.WriteRune(c) + } + buf.WriteRune('\'') + return buf.String() +} + +func convertArg(arg interface{}, convertFunc func(string) string) string { + if quoteNeeded(arg) { + argv := fmt.Sprintf("%v", arg) + return convertFunc(argv) + } + + return fmt.Sprintf("%v", arg) +} + +const insertSelectPlaceHolder = true + +func (statement *Statement) WriteArg(w *builder.BytesWriter, arg interface{}) error { + switch argv := arg.(type) { + case *builder.Builder: + if _, err := w.WriteString("("); err != nil { + return err + } + if err := argv.WriteTo(w); err != nil { + return err + } + if _, err := w.WriteString(")"); err != nil { + return err + } + default: + if insertSelectPlaceHolder { + if err := w.WriteByte('?'); err != nil { + return err + } + if v, ok := arg.(bool); ok && statement.dialect.URI().DBType == schemas.MSSQL { + if v { + w.Append(1) + } else { + w.Append(0) + } + } else { + w.Append(arg) + } + } else { + var convertFunc = convertStringSingleQuote + if statement.dialect.URI().DBType == schemas.MYSQL { + convertFunc = convertString + } + if _, err := w.WriteString(convertArg(arg, convertFunc)); err != nil { + return err + } + } + } + return nil +} + +func (statement *Statement) WriteArgs(w *builder.BytesWriter, args []interface{}) error { + for i, arg := range args { + if err := statement.WriteArg(w, arg); err != nil { + return err + } + + if i+1 != len(args) { + if _, err := w.WriteString(","); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/update.go b/vendor/xorm.io/xorm/internal/statements/update.go new file mode 100644 index 0000000..b6ae118 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/update.go @@ -0,0 +1,294 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "time" + + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) ifAddColUpdate(col *schemas.Column, includeVersion, includeUpdated, includeNil, + includeAutoIncr, update bool) (bool, error) { + columnMap := statement.ColumnMap + omitColumnMap := statement.OmitColumnMap + unscoped := statement.unscoped + + if !includeVersion && col.IsVersion { + return false, nil + } + if col.IsCreated && !columnMap.Contain(col.Name) { + return false, nil + } + if !includeUpdated && col.IsUpdated { + return false, nil + } + if !includeAutoIncr && col.IsAutoIncrement { + return false, nil + } + if col.IsDeleted && !unscoped { + return false, nil + } + if omitColumnMap.Contain(col.Name) { + return false, nil + } + if len(columnMap) > 0 && !columnMap.Contain(col.Name) { + return false, nil + } + + if col.MapType == schemas.ONLYFROMDB { + return false, nil + } + + if statement.IncrColumns.IsColExist(col.Name) { + return false, nil + } else if statement.DecrColumns.IsColExist(col.Name) { + return false, nil + } else if statement.ExprColumns.IsColExist(col.Name) { + return false, nil + } + + return true, nil +} + +// BuildUpdates auto generating update columnes and values according a struct +func (statement *Statement) BuildUpdates(tableValue reflect.Value, + includeVersion, includeUpdated, includeNil, + includeAutoIncr, update bool) ([]string, []interface{}, error) { + table := statement.RefTable + allUseBool := statement.allUseBool + useAllCols := statement.useAllCols + mustColumnMap := statement.MustColumnMap + nullableMap := statement.NullableMap + + var colNames = make([]string, 0) + var args = make([]interface{}, 0) + + for _, col := range table.Columns() { + ok, err := statement.ifAddColUpdate(col, includeVersion, includeUpdated, includeNil, + includeAutoIncr, update) + if err != nil { + return nil, nil, err + } + if !ok { + continue + } + + fieldValuePtr, err := col.ValueOfV(&tableValue) + if err != nil { + return nil, nil, err + } + + fieldValue := *fieldValuePtr + fieldType := reflect.TypeOf(fieldValue.Interface()) + if fieldType == nil { + continue + } + + requiredField := useAllCols + includeNil := useAllCols + + if b, ok := getFlagForColumn(mustColumnMap, col); ok { + if b { + requiredField = true + } else { + continue + } + } + + // !evalphobia! set fieldValue as nil when column is nullable and zero-value + if b, ok := getFlagForColumn(nullableMap, col); ok { + if b && col.Nullable && utils.IsZero(fieldValue.Interface()) { + var nilValue *int + fieldValue = reflect.ValueOf(nilValue) + fieldType = reflect.TypeOf(fieldValue.Interface()) + includeNil = true + } + } + + var val interface{} + + if fieldValue.CanAddr() { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + data, err := structConvert.ToDB() + if err != nil { + return nil, nil, err + } + + val = data + goto APPEND + } + } + + if structConvert, ok := fieldValue.Interface().(convert.Conversion); ok { + data, err := structConvert.ToDB() + if err != nil { + return nil, nil, err + } + + val = data + goto APPEND + } + + if fieldType.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + if includeNil { + args = append(args, nil) + colNames = append(colNames, fmt.Sprintf("%v=?", statement.quote(col.Name))) + } + continue + } else if !fieldValue.IsValid() { + continue + } else { + // dereference ptr type to instance type + fieldValue = fieldValue.Elem() + fieldType = reflect.TypeOf(fieldValue.Interface()) + requiredField = true + } + } + + switch fieldType.Kind() { + case reflect.Bool: + if allUseBool || requiredField { + val = fieldValue.Interface() + } else { + // if a bool in a struct, it will not be as a condition because it default is false, + // please use Where() instead + continue + } + case reflect.String: + if !requiredField && fieldValue.String() == "" { + continue + } + // for MyString, should convert to string or panic + if fieldType.String() != reflect.String.String() { + val = fieldValue.String() + } else { + val = fieldValue.Interface() + } + case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: + if !requiredField && fieldValue.Int() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Float32, reflect.Float64: + if !requiredField && fieldValue.Float() == 0.0 { + continue + } + val = fieldValue.Interface() + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + if !requiredField && fieldValue.Uint() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { + continue + } + val = dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + } else if nulType, ok := fieldValue.Interface().(driver.Valuer); ok { + val, _ = nulType.Value() + if val == nil && !requiredField { + continue + } + } else { + if !col.SQLType.IsJson() { + table, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + val = fieldValue.Interface() + } else { + if len(table.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) + // fix non-int pk issues + if pkField.IsValid() && (!requiredField && !utils.IsZero(pkField.Interface())) { + val = pkField.Interface() + } else { + continue + } + } else { + return nil, nil, errors.New("Not supported multiple primary keys") + } + } + } else { + // Blank struct could not be as update data + if requiredField || !utils.IsStructZero(fieldValue) { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, fmt.Errorf("mashal %v failed", fieldValue.Interface()) + } + if col.SQLType.IsText() { + val = string(bytes) + } else if col.SQLType.IsBlob() { + val = bytes + } + } else { + continue + } + } + } + case reflect.Array, reflect.Slice, reflect.Map: + if !requiredField { + if fieldValue == reflect.Zero(fieldType) { + continue + } + if fieldType.Kind() == reflect.Array { + if utils.IsArrayZero(fieldValue) { + continue + } + } else if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { + continue + } + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if fieldType.Kind() == reflect.Slice && + fieldType.Elem().Kind() == reflect.Uint8 { + if fieldValue.Len() > 0 { + val = fieldValue.Bytes() + } else { + continue + } + } else if fieldType.Kind() == reflect.Array && + fieldType.Elem().Kind() == reflect.Uint8 { + val = fieldValue.Slice(0, 0).Interface() + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, err + } + val = bytes + } + } else { + continue + } + default: + val = fieldValue.Interface() + } + + APPEND: + args = append(args, val) + colNames = append(colNames, fmt.Sprintf("%v = ?", statement.quote(col.Name))) + } + + return colNames, args, nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/values.go b/vendor/xorm.io/xorm/internal/statements/values.go new file mode 100644 index 0000000..a1102c5 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/values.go @@ -0,0 +1,154 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "time" + + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/schemas" +) + +var ( + nullFloatType = reflect.TypeOf(sql.NullFloat64{}) +) + +// Value2Interface convert a field value of a struct to interface for puting into database +func (statement *Statement) Value2Interface(col *schemas.Column, fieldValue reflect.Value) (interface{}, error) { + if fieldValue.CanAddr() { + if fieldConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + data, err := fieldConvert.ToDB() + if err != nil { + return nil, err + } + if col.SQLType.IsBlob() { + return data, nil + } + return string(data), nil + } + } + + if fieldConvert, ok := fieldValue.Interface().(convert.Conversion); ok { + data, err := fieldConvert.ToDB() + if err != nil { + return nil, err + } + if col.SQLType.IsBlob() { + return data, nil + } + if nil == data { + return nil, nil + } + return string(data), nil + } + + fieldType := fieldValue.Type() + k := fieldType.Kind() + if k == reflect.Ptr { + if fieldValue.IsNil() { + return nil, nil + } else if !fieldValue.IsValid() { + return nil, nil + } else { + // !nashtsai! deference pointer type to instance type + fieldValue = fieldValue.Elem() + fieldType = fieldValue.Type() + k = fieldType.Kind() + } + } + + switch k { + case reflect.Bool: + return fieldValue.Bool(), nil + case reflect.String: + return fieldValue.String(), nil + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + tf := dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + return tf, nil + } else if fieldType.ConvertibleTo(nullFloatType) { + t := fieldValue.Convert(nullFloatType).Interface().(sql.NullFloat64) + if !t.Valid { + return nil, nil + } + return t.Float64, nil + } + + if !col.SQLType.IsJson() { + // !! 增加支持driver.Valuer接口的结构,如sql.NullString + if v, ok := fieldValue.Interface().(driver.Valuer); ok { + return v.Value() + } + + fieldTable, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + return nil, err + } + if len(fieldTable.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(fieldTable.PKColumns()[0].FieldName) + return pkField.Interface(), nil + } + return nil, fmt.Errorf("no primary key for col %v", col.Name) + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + } else if col.SQLType.IsBlob() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return bytes, nil + } + return nil, fmt.Errorf("Unsupported type %v", fieldValue.Type()) + case reflect.Complex64, reflect.Complex128: + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + case reflect.Array, reflect.Slice, reflect.Map: + if !fieldValue.IsValid() { + return fieldValue.Interface(), nil + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if (k == reflect.Slice) && + (fieldValue.Type().Elem().Kind() == reflect.Uint8) { + bytes = fieldValue.Bytes() + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + } + return bytes, nil + } + return nil, ErrUnSupportedType + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return fieldValue.Uint(), nil + default: + return fieldValue.Interface(), nil + } +} diff --git a/vendor/xorm.io/xorm/internal/utils/name.go b/vendor/xorm.io/xorm/internal/utils/name.go new file mode 100644 index 0000000..f5fc3ff --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/name.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "fmt" +) + +func IndexName(tableName, idxName string) string { + return fmt.Sprintf("IDX_%v_%v", tableName, idxName) +} diff --git a/vendor/xorm.io/xorm/internal/utils/reflect.go b/vendor/xorm.io/xorm/internal/utils/reflect.go new file mode 100644 index 0000000..3dad6bf --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/reflect.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "reflect" +) + +func ReflectValue(bean interface{}) reflect.Value { + return reflect.Indirect(reflect.ValueOf(bean)) +} diff --git a/vendor/xorm.io/xorm/internal/utils/slice.go b/vendor/xorm.io/xorm/internal/utils/slice.go new file mode 100644 index 0000000..8968570 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/slice.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import "sort" + +// SliceEq return true if two slice have the same elements even if different sort. +func SliceEq(left, right []string) bool { + if len(left) != len(right) { + return false + } + sort.Sort(sort.StringSlice(left)) + sort.Sort(sort.StringSlice(right)) + for i := 0; i < len(left); i++ { + if left[i] != right[i] { + return false + } + } + return true +} diff --git a/vendor/xorm.io/xorm/internal/utils/sql.go b/vendor/xorm.io/xorm/internal/utils/sql.go new file mode 100644 index 0000000..5e68c4a --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/sql.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "strings" +) + +func IsSubQuery(tbName string) bool { + const selStr = "select" + if len(tbName) <= len(selStr)+1 { + return false + } + + return strings.EqualFold(tbName[:len(selStr)], selStr) || + strings.EqualFold(tbName[:len(selStr)+1], "("+selStr) +} diff --git a/vendor/xorm.io/xorm/internal/utils/strings.go b/vendor/xorm.io/xorm/internal/utils/strings.go new file mode 100644 index 0000000..7246670 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/strings.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "strings" +) + +func IndexNoCase(s, sep string) int { + return strings.Index(strings.ToLower(s), strings.ToLower(sep)) +} + +func SplitNoCase(s, sep string) []string { + idx := IndexNoCase(s, sep) + if idx < 0 { + return []string{s} + } + return strings.Split(s, s[idx:idx+len(sep)]) +} + +func SplitNNoCase(s, sep string, n int) []string { + idx := IndexNoCase(s, sep) + if idx < 0 { + return []string{s} + } + return strings.SplitN(s, s[idx:idx+len(sep)], n) +} diff --git a/vendor/xorm.io/xorm/internal/utils/zero.go b/vendor/xorm.io/xorm/internal/utils/zero.go new file mode 100644 index 0000000..8f033c6 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/zero.go @@ -0,0 +1,145 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "reflect" + "time" +) + +type Zeroable interface { + IsZero() bool +} + +var nilTime *time.Time + +// IsZero returns false if k is nil or has a zero value +func IsZero(k interface{}) bool { + if k == nil { + return true + } + + switch k.(type) { + case int: + return k.(int) == 0 + case int8: + return k.(int8) == 0 + case int16: + return k.(int16) == 0 + case int32: + return k.(int32) == 0 + case int64: + return k.(int64) == 0 + case uint: + return k.(uint) == 0 + case uint8: + return k.(uint8) == 0 + case uint16: + return k.(uint16) == 0 + case uint32: + return k.(uint32) == 0 + case uint64: + return k.(uint64) == 0 + case float32: + return k.(float32) == 0 + case float64: + return k.(float64) == 0 + case bool: + return k.(bool) == false + case string: + return k.(string) == "" + case *time.Time: + return k.(*time.Time) == nilTime || IsTimeZero(*k.(*time.Time)) + case time.Time: + return IsTimeZero(k.(time.Time)) + case Zeroable: + return k.(Zeroable) == nil || k.(Zeroable).IsZero() + case reflect.Value: // for go version less than 1.13 because reflect.Value has no method IsZero + return IsValueZero(k.(reflect.Value)) + } + + return IsValueZero(reflect.ValueOf(k)) +} + +var zeroType = reflect.TypeOf((*Zeroable)(nil)).Elem() + +func IsValueZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64: + return v.Int() == 0 + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64: + return v.Uint() == 0 + case reflect.String: + return v.Len() == 0 + case reflect.Ptr: + if v.IsNil() { + return true + } + return IsValueZero(v.Elem()) + case reflect.Struct: + return IsStructZero(v) + case reflect.Array: + return IsArrayZero(v) + } + return false +} + +func IsStructZero(v reflect.Value) bool { + if !v.IsValid() || v.NumField() == 0 { + return true + } + + if v.Type().Implements(zeroType) { + f := v.MethodByName("IsZero") + if f.IsValid() { + res := f.Call(nil) + return len(res) == 1 && res[0].Bool() + } + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + switch field.Kind() { + case reflect.Ptr: + field = field.Elem() + fallthrough + case reflect.Struct: + if !IsStructZero(field) { + return false + } + default: + if field.CanInterface() && !IsZero(field.Interface()) { + return false + } + } + } + return true +} + +func IsArrayZero(v reflect.Value) bool { + if !v.IsValid() || v.Len() == 0 { + return true + } + + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i).Interface()) { + return false + } + } + + return true +} + +const ( + ZeroTime0 = "0000-00-00 00:00:00" + ZeroTime1 = "0001-01-01 00:00:00" +) + +func IsTimeZero(t time.Time) bool { + return t.IsZero() || t.Format("2006-01-02 15:04:05") == ZeroTime0 || + t.Format("2006-01-02 15:04:05") == ZeroTime1 +} diff --git a/vendor/xorm.io/xorm/log/logger.go b/vendor/xorm.io/xorm/log/logger.go new file mode 100644 index 0000000..eeb6369 --- /dev/null +++ b/vendor/xorm.io/xorm/log/logger.go @@ -0,0 +1,217 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package log + +import ( + "fmt" + "io" + "log" +) + +// LogLevel defines a log level +type LogLevel int + +// enumerate all LogLevels +const ( + // !nashtsai! following level also match syslog.Priority value + LOG_DEBUG LogLevel = iota + LOG_INFO + LOG_WARNING + LOG_ERR + LOG_OFF + LOG_UNKNOWN +) + +// default log options +const ( + DEFAULT_LOG_PREFIX = "[xorm]" + DEFAULT_LOG_FLAG = log.Ldate | log.Lmicroseconds + DEFAULT_LOG_LEVEL = LOG_DEBUG +) + +// Logger is a logger interface +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + Info(v ...interface{}) + Infof(format string, v ...interface{}) + Warn(v ...interface{}) + Warnf(format string, v ...interface{}) + + Level() LogLevel + SetLevel(l LogLevel) + + ShowSQL(show ...bool) + IsShowSQL() bool +} + +var _ Logger = DiscardLogger{} + +// DiscardLogger don't log implementation for ILogger +type DiscardLogger struct{} + +// Debug empty implementation +func (DiscardLogger) Debug(v ...interface{}) {} + +// Debugf empty implementation +func (DiscardLogger) Debugf(format string, v ...interface{}) {} + +// Error empty implementation +func (DiscardLogger) Error(v ...interface{}) {} + +// Errorf empty implementation +func (DiscardLogger) Errorf(format string, v ...interface{}) {} + +// Info empty implementation +func (DiscardLogger) Info(v ...interface{}) {} + +// Infof empty implementation +func (DiscardLogger) Infof(format string, v ...interface{}) {} + +// Warn empty implementation +func (DiscardLogger) Warn(v ...interface{}) {} + +// Warnf empty implementation +func (DiscardLogger) Warnf(format string, v ...interface{}) {} + +// Level empty implementation +func (DiscardLogger) Level() LogLevel { + return LOG_UNKNOWN +} + +// SetLevel empty implementation +func (DiscardLogger) SetLevel(l LogLevel) {} + +// ShowSQL empty implementation +func (DiscardLogger) ShowSQL(show ...bool) {} + +// IsShowSQL empty implementation +func (DiscardLogger) IsShowSQL() bool { + return false +} + +// SimpleLogger is the default implment of ILogger +type SimpleLogger struct { + DEBUG *log.Logger + ERR *log.Logger + INFO *log.Logger + WARN *log.Logger + level LogLevel + showSQL bool +} + +var _ Logger = &SimpleLogger{} + +// NewSimpleLogger use a special io.Writer as logger output +func NewSimpleLogger(out io.Writer) *SimpleLogger { + return NewSimpleLogger2(out, DEFAULT_LOG_PREFIX, DEFAULT_LOG_FLAG) +} + +// NewSimpleLogger2 let you customrize your logger prefix and flag +func NewSimpleLogger2(out io.Writer, prefix string, flag int) *SimpleLogger { + return NewSimpleLogger3(out, prefix, flag, DEFAULT_LOG_LEVEL) +} + +// NewSimpleLogger3 let you customrize your logger prefix and flag and logLevel +func NewSimpleLogger3(out io.Writer, prefix string, flag int, l LogLevel) *SimpleLogger { + return &SimpleLogger{ + DEBUG: log.New(out, fmt.Sprintf("%s [debug] ", prefix), flag), + ERR: log.New(out, fmt.Sprintf("%s [error] ", prefix), flag), + INFO: log.New(out, fmt.Sprintf("%s [info] ", prefix), flag), + WARN: log.New(out, fmt.Sprintf("%s [warn] ", prefix), flag), + level: l, + } +} + +// Error implement ILogger +func (s *SimpleLogger) Error(v ...interface{}) { + if s.level <= LOG_ERR { + s.ERR.Output(2, fmt.Sprintln(v...)) + } + return +} + +// Errorf implement ILogger +func (s *SimpleLogger) Errorf(format string, v ...interface{}) { + if s.level <= LOG_ERR { + s.ERR.Output(2, fmt.Sprintf(format, v...)) + } + return +} + +// Debug implement ILogger +func (s *SimpleLogger) Debug(v ...interface{}) { + if s.level <= LOG_DEBUG { + s.DEBUG.Output(2, fmt.Sprintln(v...)) + } + return +} + +// Debugf implement ILogger +func (s *SimpleLogger) Debugf(format string, v ...interface{}) { + if s.level <= LOG_DEBUG { + s.DEBUG.Output(2, fmt.Sprintf(format, v...)) + } + return +} + +// Info implement ILogger +func (s *SimpleLogger) Info(v ...interface{}) { + if s.level <= LOG_INFO { + s.INFO.Output(2, fmt.Sprintln(v...)) + } + return +} + +// Infof implement ILogger +func (s *SimpleLogger) Infof(format string, v ...interface{}) { + if s.level <= LOG_INFO { + s.INFO.Output(2, fmt.Sprintf(format, v...)) + } + return +} + +// Warn implement ILogger +func (s *SimpleLogger) Warn(v ...interface{}) { + if s.level <= LOG_WARNING { + s.WARN.Output(2, fmt.Sprintln(v...)) + } + return +} + +// Warnf implement ILogger +func (s *SimpleLogger) Warnf(format string, v ...interface{}) { + if s.level <= LOG_WARNING { + s.WARN.Output(2, fmt.Sprintf(format, v...)) + } + return +} + +// Level implement ILogger +func (s *SimpleLogger) Level() LogLevel { + return s.level +} + +// SetLevel implement ILogger +func (s *SimpleLogger) SetLevel(l LogLevel) { + s.level = l + return +} + +// ShowSQL implement ILogger +func (s *SimpleLogger) ShowSQL(show ...bool) { + if len(show) == 0 { + s.showSQL = true + return + } + s.showSQL = show[0] +} + +// IsShowSQL implement ILogger +func (s *SimpleLogger) IsShowSQL() bool { + return s.showSQL +} diff --git a/vendor/xorm.io/xorm/log/logger_context.go b/vendor/xorm.io/xorm/log/logger_context.go new file mode 100644 index 0000000..4680257 --- /dev/null +++ b/vendor/xorm.io/xorm/log/logger_context.go @@ -0,0 +1,116 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package log + +import ( + "fmt" + + "xorm.io/xorm/contexts" +) + +// LogContext represents a log context +type LogContext contexts.ContextHook + +// SQLLogger represents an interface to log SQL +type SQLLogger interface { + BeforeSQL(context LogContext) // only invoked when IsShowSQL is true + AfterSQL(context LogContext) // only invoked when IsShowSQL is true +} + +// ContextLogger represents a logger interface with context +type ContextLogger interface { + SQLLogger + + Debugf(format string, v ...interface{}) + Errorf(format string, v ...interface{}) + Infof(format string, v ...interface{}) + Warnf(format string, v ...interface{}) + + Level() LogLevel + SetLevel(l LogLevel) + + ShowSQL(show ...bool) + IsShowSQL() bool +} + +var ( + _ ContextLogger = &LoggerAdapter{} +) + +// enumerate all the context keys +var ( + SessionIDKey = "__xorm_session_id" + SessionKey = "__xorm_session_key" + SessionShowSQLKey = "__xorm_show_sql" +) + +// LoggerAdapter wraps a Logger interface as LoggerContext interface +type LoggerAdapter struct { + logger Logger +} + +// NewLoggerAdapter creates an adapter for old xorm logger interface +func NewLoggerAdapter(logger Logger) ContextLogger { + return &LoggerAdapter{ + logger: logger, + } +} + +// BeforeSQL implements ContextLogger +func (l *LoggerAdapter) BeforeSQL(ctx LogContext) {} + +// AfterSQL implements ContextLogger +func (l *LoggerAdapter) AfterSQL(ctx LogContext) { + var sessionPart string + v := ctx.Ctx.Value(SessionIDKey) + if key, ok := v.(string); ok { + sessionPart = fmt.Sprintf(" [%s]", key) + } + if ctx.ExecuteTime > 0 { + l.logger.Infof("[SQL]%s %s %v - %v", sessionPart, ctx.SQL, ctx.Args, ctx.ExecuteTime) + } else { + l.logger.Infof("[SQL]%s %s %v", sessionPart, ctx.SQL, ctx.Args) + } +} + +// Debugf implements ContextLogger +func (l *LoggerAdapter) Debugf(format string, v ...interface{}) { + l.logger.Debugf(format, v...) +} + +// Errorf implements ContextLogger +func (l *LoggerAdapter) Errorf(format string, v ...interface{}) { + l.logger.Errorf(format, v...) +} + +// Infof implements ContextLogger +func (l *LoggerAdapter) Infof(format string, v ...interface{}) { + l.logger.Infof(format, v...) +} + +// Warnf implements ContextLogger +func (l *LoggerAdapter) Warnf(format string, v ...interface{}) { + l.logger.Warnf(format, v...) +} + +// Level implements ContextLogger +func (l *LoggerAdapter) Level() LogLevel { + return l.logger.Level() +} + +// SetLevel implements ContextLogger +func (l *LoggerAdapter) SetLevel(lv LogLevel) { + l.logger.SetLevel(lv) +} + +// ShowSQL implements ContextLogger +func (l *LoggerAdapter) ShowSQL(show ...bool) { + l.logger.ShowSQL(show...) +} + +// IsShowSQL implements ContextLogger +func (l *LoggerAdapter) IsShowSQL() bool { + return l.logger.IsShowSQL() +} diff --git a/vendor/xorm.io/xorm/log/syslogger.go b/vendor/xorm.io/xorm/log/syslogger.go new file mode 100644 index 0000000..0b3e381 --- /dev/null +++ b/vendor/xorm.io/xorm/log/syslogger.go @@ -0,0 +1,87 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows,!nacl,!plan9 + +package log + +import ( + "fmt" + "log/syslog" +) + +var _ Logger = &SyslogLogger{} + +// SyslogLogger will be depricated +type SyslogLogger struct { + w *syslog.Writer + showSQL bool +} + +// NewSyslogLogger implements Logger +func NewSyslogLogger(w *syslog.Writer) *SyslogLogger { + return &SyslogLogger{w: w} +} + +// Debug log content as Debug +func (s *SyslogLogger) Debug(v ...interface{}) { + s.w.Debug(fmt.Sprint(v...)) +} + +// Debugf log content as Debug and format +func (s *SyslogLogger) Debugf(format string, v ...interface{}) { + s.w.Debug(fmt.Sprintf(format, v...)) +} + +// Error log content as Error +func (s *SyslogLogger) Error(v ...interface{}) { + s.w.Err(fmt.Sprint(v...)) +} + +// Errorf log content as Errorf and format +func (s *SyslogLogger) Errorf(format string, v ...interface{}) { + s.w.Err(fmt.Sprintf(format, v...)) +} + +// Info log content as Info +func (s *SyslogLogger) Info(v ...interface{}) { + s.w.Info(fmt.Sprint(v...)) +} + +// Infof log content as Infof and format +func (s *SyslogLogger) Infof(format string, v ...interface{}) { + s.w.Info(fmt.Sprintf(format, v...)) +} + +// Warn log content as Warn +func (s *SyslogLogger) Warn(v ...interface{}) { + s.w.Warning(fmt.Sprint(v...)) +} + +// Warnf log content as Warnf and format +func (s *SyslogLogger) Warnf(format string, v ...interface{}) { + s.w.Warning(fmt.Sprintf(format, v...)) +} + +// Level shows log level +func (s *SyslogLogger) Level() LogLevel { + return LOG_UNKNOWN +} + +// SetLevel always return error, as current log/syslog package doesn't allow to set priority level after syslog.Writer created +func (s *SyslogLogger) SetLevel(l LogLevel) {} + +// ShowSQL set if logging SQL +func (s *SyslogLogger) ShowSQL(show ...bool) { + if len(show) == 0 { + s.showSQL = true + return + } + s.showSQL = show[0] +} + +// IsShowSQL if logging SQL +func (s *SyslogLogger) IsShowSQL() bool { + return s.showSQL +} diff --git a/vendor/xorm.io/xorm/names/mapper.go b/vendor/xorm.io/xorm/names/mapper.go new file mode 100644 index 0000000..79add76 --- /dev/null +++ b/vendor/xorm.io/xorm/names/mapper.go @@ -0,0 +1,265 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package names + +import ( + "strings" + "sync" + "unsafe" +) + +// Mapper represents a name convertation between struct's fields name and table's column name +type Mapper interface { + Obj2Table(string) string + Table2Obj(string) string +} + +type CacheMapper struct { + oriMapper Mapper + obj2tableCache map[string]string + obj2tableMutex sync.RWMutex + table2objCache map[string]string + table2objMutex sync.RWMutex +} + +func NewCacheMapper(mapper Mapper) *CacheMapper { + return &CacheMapper{oriMapper: mapper, obj2tableCache: make(map[string]string), + table2objCache: make(map[string]string), + } +} + +func (m *CacheMapper) Obj2Table(o string) string { + m.obj2tableMutex.RLock() + t, ok := m.obj2tableCache[o] + m.obj2tableMutex.RUnlock() + if ok { + return t + } + + t = m.oriMapper.Obj2Table(o) + m.obj2tableMutex.Lock() + m.obj2tableCache[o] = t + m.obj2tableMutex.Unlock() + return t +} + +func (m *CacheMapper) Table2Obj(t string) string { + m.table2objMutex.RLock() + o, ok := m.table2objCache[t] + m.table2objMutex.RUnlock() + if ok { + return o + } + + o = m.oriMapper.Table2Obj(t) + m.table2objMutex.Lock() + m.table2objCache[t] = o + m.table2objMutex.Unlock() + return o +} + +// SameMapper implements IMapper and provides same name between struct and +// database table +type SameMapper struct { +} + +func (m SameMapper) Obj2Table(o string) string { + return o +} + +func (m SameMapper) Table2Obj(t string) string { + return t +} + +// SnakeMapper implements IMapper and provides name transaltion between +// struct and database table +type SnakeMapper struct { +} + +func b2s(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func snakeCasedName(name string) string { + newstr := make([]byte, 0, len(name)+1) + for i := 0; i < len(name); i++ { + c := name[i] + if isUpper := 'A' <= c && c <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + c += 'a' - 'A' + } + newstr = append(newstr, c) + } + + return b2s(newstr) +} + +func (mapper SnakeMapper) Obj2Table(name string) string { + return snakeCasedName(name) +} + +func titleCasedName(name string) string { + newstr := make([]byte, 0, len(name)) + upNextChar := true + + name = strings.ToLower(name) + + for i := 0; i < len(name); i++ { + c := name[i] + switch { + case upNextChar: + upNextChar = false + if 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + case c == '_': + upNextChar = true + continue + } + + newstr = append(newstr, c) + } + + return b2s(newstr) +} + +func (mapper SnakeMapper) Table2Obj(name string) string { + return titleCasedName(name) +} + +// GonicMapper implements IMapper. It will consider initialisms when mapping names. +// E.g. id -> ID, user -> User and to table names: UserID -> user_id, MyUID -> my_uid +type GonicMapper map[string]bool + +func isASCIIUpper(r rune) bool { + return 'A' <= r && r <= 'Z' +} + +func toASCIIUpper(r rune) rune { + if 'a' <= r && r <= 'z' { + r -= ('a' - 'A') + } + return r +} + +func gonicCasedName(name string) string { + newstr := make([]rune, 0, len(name)+3) + for idx, chr := range name { + if isASCIIUpper(chr) && idx > 0 { + if !isASCIIUpper(newstr[len(newstr)-1]) { + newstr = append(newstr, '_') + } + } + + if !isASCIIUpper(chr) && idx > 1 { + l := len(newstr) + if isASCIIUpper(newstr[l-1]) && isASCIIUpper(newstr[l-2]) { + newstr = append(newstr, newstr[l-1]) + newstr[l-1] = '_' + } + } + + newstr = append(newstr, chr) + } + return strings.ToLower(string(newstr)) +} + +func (mapper GonicMapper) Obj2Table(name string) string { + return gonicCasedName(name) +} + +func (mapper GonicMapper) Table2Obj(name string) string { + newstr := make([]rune, 0) + + name = strings.ToLower(name) + parts := strings.Split(name, "_") + + for _, p := range parts { + _, isInitialism := mapper[strings.ToUpper(p)] + for i, r := range p { + if i == 0 || isInitialism { + r = toASCIIUpper(r) + } + newstr = append(newstr, r) + } + } + + return string(newstr) +} + +// LintGonicMapper is A GonicMapper that contains a list of common initialisms taken from golang/lint +var LintGonicMapper = GonicMapper{ + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SSH": true, + "TLS": true, + "TTL": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XSRF": true, + "XSS": true, +} + +// PrefixMapper provides prefix table name support +type PrefixMapper struct { + Mapper Mapper + Prefix string +} + +func (mapper PrefixMapper) Obj2Table(name string) string { + return mapper.Prefix + mapper.Mapper.Obj2Table(name) +} + +func (mapper PrefixMapper) Table2Obj(name string) string { + return mapper.Mapper.Table2Obj(name[len(mapper.Prefix):]) +} + +func NewPrefixMapper(mapper Mapper, prefix string) PrefixMapper { + return PrefixMapper{mapper, prefix} +} + +// SuffixMapper provides suffix table name support +type SuffixMapper struct { + Mapper Mapper + Suffix string +} + +func (mapper SuffixMapper) Obj2Table(name string) string { + return mapper.Mapper.Obj2Table(name) + mapper.Suffix +} + +func (mapper SuffixMapper) Table2Obj(name string) string { + return mapper.Mapper.Table2Obj(name[:len(name)-len(mapper.Suffix)]) +} + +func NewSuffixMapper(mapper Mapper, suffix string) SuffixMapper { + return SuffixMapper{mapper, suffix} +} diff --git a/vendor/xorm.io/xorm/names/table_name.go b/vendor/xorm.io/xorm/names/table_name.go new file mode 100644 index 0000000..0afb1ae --- /dev/null +++ b/vendor/xorm.io/xorm/names/table_name.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package names + +import ( + "reflect" + "sync" +) + +// TableName table name interface to define customerize table name +type TableName interface { + TableName() string +} + +var ( + tpTableName = reflect.TypeOf((*TableName)(nil)).Elem() + tvCache sync.Map +) + +func GetTableName(mapper Mapper, v reflect.Value) string { + if v.Type().Implements(tpTableName) { + return v.Interface().(TableName).TableName() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + if v.Type().Implements(tpTableName) { + return v.Interface().(TableName).TableName() + } + } else if v.CanAddr() { + v1 := v.Addr() + if v1.Type().Implements(tpTableName) { + return v1.Interface().(TableName).TableName() + } + } else { + name, ok := tvCache.Load(v.Type()) + if ok { + if name.(string) != "" { + return name.(string) + } + } else { + v2 := reflect.New(v.Type()) + if v2.Type().Implements(tpTableName) { + tableName := v2.Interface().(TableName).TableName() + tvCache.Store(v.Type(), tableName) + return tableName + } + + tvCache.Store(v.Type(), "") + } + } + + return mapper.Obj2Table(v.Type().Name()) +} diff --git a/vendor/xorm.io/xorm/processors.go b/vendor/xorm.io/xorm/processors.go new file mode 100644 index 0000000..8697e30 --- /dev/null +++ b/vendor/xorm.io/xorm/processors.go @@ -0,0 +1,144 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +// BeforeInsertProcessor executed before an object is initially persisted to the database +type BeforeInsertProcessor interface { + BeforeInsert() +} + +// BeforeUpdateProcessor executed before an object is updated +type BeforeUpdateProcessor interface { + BeforeUpdate() +} + +// BeforeDeleteProcessor executed before an object is deleted +type BeforeDeleteProcessor interface { + BeforeDelete() +} + +// BeforeSetProcessor executed before data set to the struct fields +type BeforeSetProcessor interface { + BeforeSet(string, Cell) +} + +// AfterSetProcessor executed after data set to the struct fields +type AfterSetProcessor interface { + AfterSet(string, Cell) +} + +// AfterInsertProcessor executed after an object is persisted to the database +type AfterInsertProcessor interface { + AfterInsert() +} + +// AfterUpdateProcessor executed after an object has been updated +type AfterUpdateProcessor interface { + AfterUpdate() +} + +// AfterDeleteProcessor executed after an object has been deleted +type AfterDeleteProcessor interface { + AfterDelete() +} + +// AfterLoadProcessor executed after an ojbect has been loaded from database +type AfterLoadProcessor interface { + AfterLoad() +} + +// AfterLoadSessionProcessor executed after an ojbect has been loaded from database with session parameter +type AfterLoadSessionProcessor interface { + AfterLoad(*Session) +} + +type executedProcessorFunc func(*Session, interface{}) error + +type executedProcessor struct { + fun executedProcessorFunc + session *Session + bean interface{} +} + +func (executor *executedProcessor) execute() error { + return executor.fun(executor.session, executor.bean) +} + +func (session *Session) executeProcessors() error { + processors := session.afterProcessors + session.afterProcessors = make([]executedProcessor, 0) + for _, processor := range processors { + if err := processor.execute(); err != nil { + return err + } + } + return nil +} + +func cleanupProcessorsClosures(slices *[]func(interface{})) { + if len(*slices) > 0 { + *slices = make([]func(interface{}), 0) + } +} + +func executeBeforeClosures(session *Session, bean interface{}) { + // handle before delete processors + for _, closure := range session.beforeClosures { + closure(bean) + } + cleanupProcessorsClosures(&session.beforeClosures) +} + +func executeBeforeSet(bean interface{}, fields []string, scanResults []interface{}) { + if b, hasBeforeSet := bean.(BeforeSetProcessor); hasBeforeSet { + for ii, key := range fields { + b.BeforeSet(key, Cell(scanResults[ii].(*interface{}))) + } + } +} + +func executeAfterSet(bean interface{}, fields []string, scanResults []interface{}) { + if b, hasAfterSet := bean.(AfterSetProcessor); hasAfterSet { + for ii, key := range fields { + b.AfterSet(key, Cell(scanResults[ii].(*interface{}))) + } + } +} + +func buildAfterProcessors(session *Session, bean interface{}) { + // handle afterClosures + for _, closure := range session.afterClosures { + session.afterProcessors = append(session.afterProcessors, executedProcessor{ + fun: func(sess *Session, bean interface{}) error { + closure(bean) + return nil + }, + session: session, + bean: bean, + }) + } + + if a, has := bean.(AfterLoadProcessor); has { + session.afterProcessors = append(session.afterProcessors, executedProcessor{ + fun: func(sess *Session, bean interface{}) error { + a.AfterLoad() + return nil + }, + session: session, + bean: bean, + }) + } + + if a, has := bean.(AfterLoadSessionProcessor); has { + session.afterProcessors = append(session.afterProcessors, executedProcessor{ + fun: func(sess *Session, bean interface{}) error { + a.AfterLoad(sess) + return nil + }, + session: session, + bean: bean, + }) + } +} diff --git a/vendor/xorm.io/xorm/rows.go b/vendor/xorm.io/xorm/rows.go new file mode 100644 index 0000000..a56ea1c --- /dev/null +++ b/vendor/xorm.io/xorm/rows.go @@ -0,0 +1,158 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + + "xorm.io/builder" + "xorm.io/xorm/core" + "xorm.io/xorm/internal/utils" +) + +// Rows rows wrapper a rows to +type Rows struct { + session *Session + rows *core.Rows + beanType reflect.Type + lastError error +} + +func newRows(session *Session, bean interface{}) (*Rows, error) { + rows := new(Rows) + rows.session = session + rows.beanType = reflect.Indirect(reflect.ValueOf(bean)).Type() + + var sqlStr string + var args []interface{} + var err error + + beanValue := reflect.ValueOf(bean) + if beanValue.Kind() != reflect.Ptr { + return nil, errors.New("needs a pointer to a value") + } else if beanValue.Elem().Kind() == reflect.Ptr { + return nil, errors.New("a pointer to a pointer is not allowed") + } + + if err = rows.session.statement.SetRefBean(bean); err != nil { + return nil, err + } + + if len(session.statement.TableName()) <= 0 { + return nil, ErrTableNotFound + } + + if rows.session.statement.RawSQL == "" { + var autoCond builder.Cond + var addedTableName = (len(session.statement.JoinStr) > 0) + var table = rows.session.statement.RefTable + + if !session.statement.NoAutoCondition { + var err error + autoCond, err = session.statement.BuildConds(table, bean, true, true, false, true, addedTableName) + if err != nil { + return nil, err + } + } else { + // !oinume! Add " IS NULL" to WHERE whatever condiBean is given. + // See https://gitea.com/xorm/xorm/issues/179 + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + var colName = session.engine.Quote(col.Name) + if addedTableName { + var nm = session.statement.TableName() + if len(session.statement.TableAlias) > 0 { + nm = session.statement.TableAlias + } + colName = session.engine.Quote(nm) + "." + colName + } + + autoCond = session.statement.CondDeleted(col) + } + } + + sqlStr, args, err = rows.session.statement.GenFindSQL(autoCond) + if err != nil { + return nil, err + } + } else { + sqlStr = rows.session.statement.GenRawSQL() + args = rows.session.statement.RawParams + } + + rows.rows, err = rows.session.queryRows(sqlStr, args...) + if err != nil { + rows.lastError = err + rows.Close() + return nil, err + } + + return rows, nil +} + +// Next move cursor to next record, return false if end has reached +func (rows *Rows) Next() bool { + if rows.lastError == nil && rows.rows != nil { + hasNext := rows.rows.Next() + if !hasNext { + rows.lastError = sql.ErrNoRows + } + return hasNext + } + return false +} + +// Err returns the error, if any, that was encountered during iteration. Err may be called after an explicit or implicit Close. +func (rows *Rows) Err() error { + return rows.lastError +} + +// Scan row record to bean properties +func (rows *Rows) Scan(bean interface{}) error { + if rows.lastError != nil { + return rows.lastError + } + + if reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType { + return fmt.Errorf("scan arg is incompatible type to [%v]", rows.beanType) + } + + if err := rows.session.statement.SetRefBean(bean); err != nil { + return err + } + + fields, err := rows.rows.Columns() + if err != nil { + return err + } + + scanResults, err := rows.session.row2Slice(rows.rows, fields, bean) + if err != nil { + return err + } + + dataStruct := utils.ReflectValue(bean) + _, err = rows.session.slice2Bean(scanResults, fields, bean, &dataStruct, rows.session.statement.RefTable) + if err != nil { + return err + } + + return rows.session.executeProcessors() +} + +// Close session if session.IsAutoClose is true, and claimed any opened resources +func (rows *Rows) Close() error { + if rows.session.isAutoClose { + defer rows.session.Close() + } + + if rows.rows != nil { + return rows.rows.Close() + } + + return rows.lastError +} diff --git a/vendor/xorm.io/xorm/schemas/column.go b/vendor/xorm.io/xorm/schemas/column.go new file mode 100644 index 0000000..db66a3a --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/column.go @@ -0,0 +1,133 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +const ( + TWOSIDES = iota + 1 + ONLYTODB + ONLYFROMDB +) + +// Column defines database column +type Column struct { + Name string + TableName string + FieldName string // Avaiable only when parsed from a struct + SQLType SQLType + IsJSON bool + Length int + Length2 int + Nullable bool + Default string + Indexes map[string]int + IsPrimaryKey bool + IsAutoIncrement bool + MapType int + IsCreated bool + IsUpdated bool + IsDeleted bool + IsCascade bool + IsVersion bool + DefaultIsEmpty bool // false means column has no default set, but not default value is empty + EnumOptions map[string]int + SetOptions map[string]int + DisableTimeZone bool + TimeZone *time.Location // column specified time zone + Comment string +} + +// NewColumn creates a new column +func NewColumn(name, fieldName string, sqlType SQLType, len1, len2 int, nullable bool) *Column { + return &Column{ + Name: name, + TableName: "", + FieldName: fieldName, + SQLType: sqlType, + Length: len1, + Length2: len2, + Nullable: nullable, + Default: "", + Indexes: make(map[string]int), + IsPrimaryKey: false, + IsAutoIncrement: false, + MapType: TWOSIDES, + IsCreated: false, + IsUpdated: false, + IsDeleted: false, + IsCascade: false, + IsVersion: false, + DefaultIsEmpty: true, // default should be no default + EnumOptions: make(map[string]int), + Comment: "", + } +} + +// ValueOf returns column's filed of struct's value +func (col *Column) ValueOf(bean interface{}) (*reflect.Value, error) { + dataStruct := reflect.Indirect(reflect.ValueOf(bean)) + return col.ValueOfV(&dataStruct) +} + +// ValueOfV returns column's filed of struct's value accept reflevt value +func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) { + var fieldValue reflect.Value + fieldPath := strings.Split(col.FieldName, ".") + + if dataStruct.Type().Kind() == reflect.Map { + keyValue := reflect.ValueOf(fieldPath[len(fieldPath)-1]) + fieldValue = dataStruct.MapIndex(keyValue) + return &fieldValue, nil + } else if dataStruct.Type().Kind() == reflect.Interface { + structValue := reflect.ValueOf(dataStruct.Interface()) + dataStruct = &structValue + } + + level := len(fieldPath) + fieldValue = dataStruct.FieldByName(fieldPath[0]) + for i := 0; i < level-1; i++ { + if !fieldValue.IsValid() { + break + } + if fieldValue.Kind() == reflect.Struct { + fieldValue = fieldValue.FieldByName(fieldPath[i+1]) + } else if fieldValue.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + fieldValue.Set(reflect.New(fieldValue.Type().Elem())) + } + fieldValue = fieldValue.Elem().FieldByName(fieldPath[i+1]) + } else { + return nil, fmt.Errorf("field %v is not valid", col.FieldName) + } + } + + if !fieldValue.IsValid() { + return nil, fmt.Errorf("field %v is not valid", col.FieldName) + } + + return &fieldValue, nil +} + +// ConvertID converts id content to suitable type according column type +func (col *Column) ConvertID(sid string) (interface{}, error) { + if col.SQLType.IsNumeric() { + n, err := strconv.ParseInt(sid, 10, 64) + if err != nil { + return nil, err + } + return n, nil + } else if col.SQLType.IsText() { + return sid, nil + } + return nil, errors.New("not supported") +} diff --git a/vendor/xorm.io/xorm/schemas/index.go b/vendor/xorm.io/xorm/schemas/index.go new file mode 100644 index 0000000..9541250 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/index.go @@ -0,0 +1,72 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "fmt" + "strings" +) + +// enumerate all index types +const ( + IndexType = iota + 1 + UniqueType +) + +// Index represents a database index +type Index struct { + IsRegular bool + Name string + Type int + Cols []string +} + +// NewIndex new an index object +func NewIndex(name string, indexType int) *Index { + return &Index{true, name, indexType, make([]string, 0)} +} + +func (index *Index) XName(tableName string) string { + if !strings.HasPrefix(index.Name, "UQE_") && + !strings.HasPrefix(index.Name, "IDX_") { + tableParts := strings.Split(strings.Replace(tableName, `"`, "", -1), ".") + tableName = tableParts[len(tableParts)-1] + if index.Type == UniqueType { + return fmt.Sprintf("UQE_%v_%v", tableName, index.Name) + } + return fmt.Sprintf("IDX_%v_%v", tableName, index.Name) + } + return index.Name +} + +// AddColumn add columns which will be composite index +func (index *Index) AddColumn(cols ...string) { + for _, col := range cols { + index.Cols = append(index.Cols, col) + } +} + +func (index *Index) Equal(dst *Index) bool { + if index.Type != dst.Type { + return false + } + if len(index.Cols) != len(dst.Cols) { + return false + } + + for i := 0; i < len(index.Cols); i++ { + var found bool + for j := 0; j < len(dst.Cols); j++ { + if index.Cols[i] == dst.Cols[j] { + found = true + break + } + } + if !found { + return false + } + } + return true +} diff --git a/vendor/xorm.io/xorm/schemas/pk.go b/vendor/xorm.io/xorm/schemas/pk.go new file mode 100644 index 0000000..03916b4 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/pk.go @@ -0,0 +1,41 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "bytes" + "encoding/gob" + + "xorm.io/xorm/internal/utils" +) + +type PK []interface{} + +func NewPK(pks ...interface{}) *PK { + p := PK(pks) + return &p +} + +func (p *PK) IsZero() bool { + for _, k := range *p { + if utils.IsZero(k) { + return true + } + } + return false +} + +func (p *PK) ToString() (string, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err := enc.Encode(*p) + return buf.String(), err +} + +func (p *PK) FromString(content string) error { + dec := gob.NewDecoder(bytes.NewBufferString(content)) + err := dec.Decode(p) + return err +} diff --git a/vendor/xorm.io/xorm/schemas/quote.go b/vendor/xorm.io/xorm/schemas/quote.go new file mode 100644 index 0000000..c44abe2 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/quote.go @@ -0,0 +1,240 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "strings" +) + +// Quoter represents a quoter to the SQL table name and column name +type Quoter struct { + Prefix byte + Suffix byte + IsReserved func(string) bool +} + +var ( + // AlwaysFalseReverse always think it's not a reverse word + AlwaysNoReserve = func(string) bool { return false } + + // AlwaysReverse always reverse the word + AlwaysReserve = func(string) bool { return true } + + // CommanQuoteMark represnets the common quote mark + CommanQuoteMark byte = '`' + + // CommonQuoter represetns a common quoter + CommonQuoter = Quoter{CommanQuoteMark, CommanQuoteMark, AlwaysReserve} +) + +func (q Quoter) IsEmpty() bool { + return q.Prefix == 0 && q.Suffix == 0 +} + +func (q Quoter) Quote(s string) string { + var buf strings.Builder + q.QuoteTo(&buf, s) + return buf.String() +} + +// Trim removes quotes from s +func (q Quoter) Trim(s string) string { + if len(s) < 2 { + return s + } + + var buf strings.Builder + for i := 0; i < len(s); i++ { + switch { + case i == 0 && s[i] == q.Prefix: + case i == len(s)-1 && s[i] == q.Suffix: + case s[i] == q.Suffix && s[i+1] == '.': + case s[i] == q.Prefix && s[i-1] == '.': + default: + buf.WriteByte(s[i]) + } + } + return buf.String() +} + +func (q Quoter) Join(a []string, sep string) string { + var b strings.Builder + q.JoinWrite(&b, a, sep) + return b.String() +} + +func (q Quoter) JoinWrite(b *strings.Builder, a []string, sep string) error { + if len(a) == 0 { + return nil + } + + n := len(sep) * (len(a) - 1) + for i := 0; i < len(a); i++ { + n += len(a[i]) + } + + b.Grow(n) + for i, s := range a { + if i > 0 { + if _, err := b.WriteString(sep); err != nil { + return err + } + } + if s != "*" { + q.QuoteTo(b, strings.TrimSpace(s)) + } + } + return nil +} + +func findWord(v string, start int) int { + for j := start; j < len(v); j++ { + switch v[j] { + case '.', ' ': + return j + } + } + return len(v) +} + +func findStart(value string, start int) int { + if value[start] == '.' { + return start + 1 + } + if value[start] != ' ' { + return start + } + + var k = -1 + for j := start; j < len(value); j++ { + if value[j] != ' ' { + k = j + break + } + } + if k == -1 { + return len(value) + } + + if (value[k] == 'A' || value[k] == 'a') && (value[k+1] == 'S' || value[k+1] == 's') { + k = k + 2 + } + + for j := k; j < len(value); j++ { + if value[j] != ' ' { + return j + } + } + return len(value) +} + +func (q Quoter) quoteWordTo(buf *strings.Builder, word string) error { + var realWord = word + if (word[0] == CommanQuoteMark && word[len(word)-1] == CommanQuoteMark) || + (word[0] == q.Prefix && word[len(word)-1] == q.Suffix) { + realWord = word[1 : len(word)-1] + } + + if q.IsEmpty() { + _, err := buf.WriteString(realWord) + return err + } + + isReserved := q.IsReserved(realWord) + if isReserved { + if err := buf.WriteByte(q.Prefix); err != nil { + return err + } + } + if _, err := buf.WriteString(realWord); err != nil { + return err + } + if isReserved { + return buf.WriteByte(q.Suffix) + } + + return nil +} + +// QuoteTo quotes the table or column names. i.e. if the quotes are [ and ] +// name -> [name] +// `name` -> [name] +// [name] -> [name] +// schema.name -> [schema].[name] +// `schema`.`name` -> [schema].[name] +// `schema`.name -> [schema].[name] +// schema.`name` -> [schema].[name] +// [schema].name -> [schema].[name] +// schema.[name] -> [schema].[name] +// name AS a -> [name] AS a +// schema.name AS a -> [schema].[name] AS a +func (q Quoter) QuoteTo(buf *strings.Builder, value string) error { + var i int + for i < len(value) { + start := findStart(value, i) + if start > i { + if _, err := buf.WriteString(value[i:start]); err != nil { + return err + } + } + if start == len(value) { + return nil + } + + var nextEnd = findWord(value, start) + if err := q.quoteWordTo(buf, value[start:nextEnd]); err != nil { + return err + } + i = nextEnd + } + return nil +} + +// Strings quotes a slice of string +func (q Quoter) Strings(s []string) []string { + var res = make([]string, 0, len(s)) + for _, a := range s { + res = append(res, q.Quote(a)) + } + return res +} + +// Replace replaces common quote(`) as the quotes on the sql +func (q Quoter) Replace(sql string) string { + if q.IsEmpty() { + return sql + } + + var buf strings.Builder + buf.Grow(len(sql)) + + var beginSingleQuote bool + for i := 0; i < len(sql); i++ { + if !beginSingleQuote && sql[i] == CommanQuoteMark { + var j = i + 1 + for ; j < len(sql); j++ { + if sql[j] == CommanQuoteMark { + break + } + } + word := sql[i+1 : j] + isReserved := q.IsReserved(word) + if isReserved { + buf.WriteByte(q.Prefix) + } + buf.WriteString(word) + if isReserved { + buf.WriteByte(q.Suffix) + } + i = j + } else { + if sql[i] == '\'' { + beginSingleQuote = !beginSingleQuote + } + buf.WriteByte(sql[i]) + } + } + return buf.String() +} diff --git a/vendor/xorm.io/xorm/schemas/table.go b/vendor/xorm.io/xorm/schemas/table.go new file mode 100644 index 0000000..6c57a7e --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/table.go @@ -0,0 +1,195 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +// Table represents a database table +type Table struct { + Name string + Type reflect.Type + columnsSeq []string + columnsMap map[string][]*Column + columns []*Column + Indexes map[string]*Index + PrimaryKeys []string + AutoIncrement string + Created map[string]bool + Updated string + Deleted string + Version string + StoreEngine string + Charset string + Comment string +} + +// NewEmptyTable creates an empty table +func NewEmptyTable() *Table { + return NewTable("", nil) +} + +// NewTable creates a new Table object +func NewTable(name string, t reflect.Type) *Table { + return &Table{Name: name, Type: t, + columnsSeq: make([]string, 0), + columns: make([]*Column, 0), + columnsMap: make(map[string][]*Column), + Indexes: make(map[string]*Index), + Created: make(map[string]bool), + PrimaryKeys: make([]string, 0), + } +} + +// Columns returns table's columns +func (table *Table) Columns() []*Column { + return table.columns +} + +// ColumnsSeq returns table's column names according sequence +func (table *Table) ColumnsSeq() []string { + return table.columnsSeq +} + +func (table *Table) columnsByName(name string) []*Column { + for k, cols := range table.columnsMap { + if strings.EqualFold(k, name) { + return cols + } + } + return nil +} + +// GetColumn returns column according column name, if column not found, return nil +func (table *Table) GetColumn(name string) *Column { + cols := table.columnsByName(name) + if cols != nil { + return cols[0] + } + + return nil +} + +// GetColumnIdx returns column according name and idx +func (table *Table) GetColumnIdx(name string, idx int) *Column { + cols := table.columnsByName(name) + if cols != nil && idx < len(cols) { + return cols[idx] + } + + return nil +} + +// PKColumns reprents all primary key columns +func (table *Table) PKColumns() []*Column { + columns := make([]*Column, len(table.PrimaryKeys)) + for i, name := range table.PrimaryKeys { + columns[i] = table.GetColumn(name) + } + return columns +} + +func (table *Table) ColumnType(name string) reflect.Type { + t, _ := table.Type.FieldByName(name) + return t.Type +} + +func (table *Table) AutoIncrColumn() *Column { + return table.GetColumn(table.AutoIncrement) +} + +func (table *Table) VersionColumn() *Column { + return table.GetColumn(table.Version) +} + +func (table *Table) UpdatedColumn() *Column { + return table.GetColumn(table.Updated) +} + +func (table *Table) DeletedColumn() *Column { + return table.GetColumn(table.Deleted) +} + +// AddColumn adds a column to table +func (table *Table) AddColumn(col *Column) { + table.columnsSeq = append(table.columnsSeq, col.Name) + table.columns = append(table.columns, col) + colName := strings.ToLower(col.Name) + if c, ok := table.columnsMap[colName]; ok { + table.columnsMap[colName] = append(c, col) + } else { + table.columnsMap[colName] = []*Column{col} + } + + if col.IsPrimaryKey { + table.PrimaryKeys = append(table.PrimaryKeys, col.Name) + } + if col.IsAutoIncrement { + table.AutoIncrement = col.Name + } + if col.IsCreated { + table.Created[col.Name] = true + } + if col.IsUpdated { + table.Updated = col.Name + } + if col.IsDeleted { + table.Deleted = col.Name + } + if col.IsVersion { + table.Version = col.Name + } +} + +// AddIndex adds an index or an unique to table +func (table *Table) AddIndex(index *Index) { + table.Indexes[index.Name] = index +} + +// IDOfV get id from one value of struct +func (table *Table) IDOfV(rv reflect.Value) (PK, error) { + v := reflect.Indirect(rv) + pk := make([]interface{}, len(table.PrimaryKeys)) + for i, col := range table.PKColumns() { + var err error + + fieldName := col.FieldName + for { + parts := strings.SplitN(fieldName, ".", 2) + if len(parts) == 1 { + break + } + + v = v.FieldByName(parts[0]) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("Unsupported read value of column %s from field %s", col.Name, col.FieldName) + } + fieldName = parts[1] + } + + pkField := v.FieldByName(fieldName) + switch pkField.Kind() { + case reflect.String: + pk[i], err = col.ConvertID(pkField.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + pk[i], err = col.ConvertID(strconv.FormatInt(pkField.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // id of uint will be converted to int64 + pk[i], err = col.ConvertID(strconv.FormatUint(pkField.Uint(), 10)) + } + + if err != nil { + return nil, err + } + } + return PK(pk), nil +} diff --git a/vendor/xorm.io/xorm/schemas/type.go b/vendor/xorm.io/xorm/schemas/type.go new file mode 100644 index 0000000..89459a4 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/type.go @@ -0,0 +1,336 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "reflect" + "sort" + "strings" + "time" +) + +type DBType string + +const ( + POSTGRES DBType = "postgres" + SQLITE DBType = "sqlite3" + MYSQL DBType = "mysql" + MSSQL DBType = "mssql" + ORACLE DBType = "oracle" +) + +// SQLType represents SQL types +type SQLType struct { + Name string + DefaultLength int + DefaultLength2 int +} + +const ( + UNKNOW_TYPE = iota + TEXT_TYPE + BLOB_TYPE + TIME_TYPE + NUMERIC_TYPE + ARRAY_TYPE +) + +func (s *SQLType) IsType(st int) bool { + if t, ok := SqlTypes[s.Name]; ok && t == st { + return true + } + return false +} + +func (s *SQLType) IsText() bool { + return s.IsType(TEXT_TYPE) +} + +func (s *SQLType) IsBlob() bool { + return s.IsType(BLOB_TYPE) +} + +func (s *SQLType) IsTime() bool { + return s.IsType(TIME_TYPE) +} + +func (s *SQLType) IsNumeric() bool { + return s.IsType(NUMERIC_TYPE) +} + +func (s *SQLType) IsArray() bool { + return s.IsType(ARRAY_TYPE) +} + +func (s *SQLType) IsJson() bool { + return s.Name == Json || s.Name == Jsonb +} + +var ( + Bit = "BIT" + TinyInt = "TINYINT" + SmallInt = "SMALLINT" + MediumInt = "MEDIUMINT" + Int = "INT" + Integer = "INTEGER" + BigInt = "BIGINT" + + Enum = "ENUM" + Set = "SET" + + Char = "CHAR" + Varchar = "VARCHAR" + NChar = "NCHAR" + NVarchar = "NVARCHAR" + TinyText = "TINYTEXT" + Text = "TEXT" + NText = "NTEXT" + Clob = "CLOB" + MediumText = "MEDIUMTEXT" + LongText = "LONGTEXT" + Uuid = "UUID" + UniqueIdentifier = "UNIQUEIDENTIFIER" + SysName = "SYSNAME" + + Date = "DATE" + DateTime = "DATETIME" + SmallDateTime = "SMALLDATETIME" + Time = "TIME" + TimeStamp = "TIMESTAMP" + TimeStampz = "TIMESTAMPZ" + Year = "YEAR" + + Decimal = "DECIMAL" + Numeric = "NUMERIC" + Money = "MONEY" + SmallMoney = "SMALLMONEY" + + Real = "REAL" + Float = "FLOAT" + Double = "DOUBLE" + + Binary = "BINARY" + VarBinary = "VARBINARY" + TinyBlob = "TINYBLOB" + Blob = "BLOB" + MediumBlob = "MEDIUMBLOB" + LongBlob = "LONGBLOB" + Bytea = "BYTEA" + + Bool = "BOOL" + Boolean = "BOOLEAN" + + Serial = "SERIAL" + BigSerial = "BIGSERIAL" + + Json = "JSON" + Jsonb = "JSONB" + + Array = "ARRAY" + + SqlTypes = map[string]int{ + Bit: NUMERIC_TYPE, + TinyInt: NUMERIC_TYPE, + SmallInt: NUMERIC_TYPE, + MediumInt: NUMERIC_TYPE, + Int: NUMERIC_TYPE, + Integer: NUMERIC_TYPE, + BigInt: NUMERIC_TYPE, + + Enum: TEXT_TYPE, + Set: TEXT_TYPE, + Json: TEXT_TYPE, + Jsonb: TEXT_TYPE, + + Char: TEXT_TYPE, + NChar: TEXT_TYPE, + Varchar: TEXT_TYPE, + NVarchar: TEXT_TYPE, + TinyText: TEXT_TYPE, + Text: TEXT_TYPE, + NText: TEXT_TYPE, + MediumText: TEXT_TYPE, + LongText: TEXT_TYPE, + Uuid: TEXT_TYPE, + Clob: TEXT_TYPE, + SysName: TEXT_TYPE, + + Date: TIME_TYPE, + DateTime: TIME_TYPE, + Time: TIME_TYPE, + TimeStamp: TIME_TYPE, + TimeStampz: TIME_TYPE, + SmallDateTime: TIME_TYPE, + Year: TIME_TYPE, + + Decimal: NUMERIC_TYPE, + Numeric: NUMERIC_TYPE, + Real: NUMERIC_TYPE, + Float: NUMERIC_TYPE, + Double: NUMERIC_TYPE, + Money: NUMERIC_TYPE, + SmallMoney: NUMERIC_TYPE, + + Binary: BLOB_TYPE, + VarBinary: BLOB_TYPE, + + TinyBlob: BLOB_TYPE, + Blob: BLOB_TYPE, + MediumBlob: BLOB_TYPE, + LongBlob: BLOB_TYPE, + Bytea: BLOB_TYPE, + UniqueIdentifier: BLOB_TYPE, + + Bool: NUMERIC_TYPE, + + Serial: NUMERIC_TYPE, + BigSerial: NUMERIC_TYPE, + + Array: ARRAY_TYPE, + } + + intTypes = sort.StringSlice{"*int", "*int16", "*int32", "*int8"} + uintTypes = sort.StringSlice{"*uint", "*uint16", "*uint32", "*uint8"} +) + +// !nashtsai! treat following var as interal const values, these are used for reflect.TypeOf comparison +var ( + c_EMPTY_STRING string + c_BOOL_DEFAULT bool + c_BYTE_DEFAULT byte + c_COMPLEX64_DEFAULT complex64 + c_COMPLEX128_DEFAULT complex128 + c_FLOAT32_DEFAULT float32 + c_FLOAT64_DEFAULT float64 + c_INT64_DEFAULT int64 + c_UINT64_DEFAULT uint64 + c_INT32_DEFAULT int32 + c_UINT32_DEFAULT uint32 + c_INT16_DEFAULT int16 + c_UINT16_DEFAULT uint16 + c_INT8_DEFAULT int8 + c_UINT8_DEFAULT uint8 + c_INT_DEFAULT int + c_UINT_DEFAULT uint + c_TIME_DEFAULT time.Time +) + +var ( + IntType = reflect.TypeOf(c_INT_DEFAULT) + Int8Type = reflect.TypeOf(c_INT8_DEFAULT) + Int16Type = reflect.TypeOf(c_INT16_DEFAULT) + Int32Type = reflect.TypeOf(c_INT32_DEFAULT) + Int64Type = reflect.TypeOf(c_INT64_DEFAULT) + + UintType = reflect.TypeOf(c_UINT_DEFAULT) + Uint8Type = reflect.TypeOf(c_UINT8_DEFAULT) + Uint16Type = reflect.TypeOf(c_UINT16_DEFAULT) + Uint32Type = reflect.TypeOf(c_UINT32_DEFAULT) + Uint64Type = reflect.TypeOf(c_UINT64_DEFAULT) + + Float32Type = reflect.TypeOf(c_FLOAT32_DEFAULT) + Float64Type = reflect.TypeOf(c_FLOAT64_DEFAULT) + + Complex64Type = reflect.TypeOf(c_COMPLEX64_DEFAULT) + Complex128Type = reflect.TypeOf(c_COMPLEX128_DEFAULT) + + StringType = reflect.TypeOf(c_EMPTY_STRING) + BoolType = reflect.TypeOf(c_BOOL_DEFAULT) + ByteType = reflect.TypeOf(c_BYTE_DEFAULT) + BytesType = reflect.SliceOf(ByteType) + + TimeType = reflect.TypeOf(c_TIME_DEFAULT) +) + +var ( + PtrIntType = reflect.PtrTo(IntType) + PtrInt8Type = reflect.PtrTo(Int8Type) + PtrInt16Type = reflect.PtrTo(Int16Type) + PtrInt32Type = reflect.PtrTo(Int32Type) + PtrInt64Type = reflect.PtrTo(Int64Type) + + PtrUintType = reflect.PtrTo(UintType) + PtrUint8Type = reflect.PtrTo(Uint8Type) + PtrUint16Type = reflect.PtrTo(Uint16Type) + PtrUint32Type = reflect.PtrTo(Uint32Type) + PtrUint64Type = reflect.PtrTo(Uint64Type) + + PtrFloat32Type = reflect.PtrTo(Float32Type) + PtrFloat64Type = reflect.PtrTo(Float64Type) + + PtrComplex64Type = reflect.PtrTo(Complex64Type) + PtrComplex128Type = reflect.PtrTo(Complex128Type) + + PtrStringType = reflect.PtrTo(StringType) + PtrBoolType = reflect.PtrTo(BoolType) + PtrByteType = reflect.PtrTo(ByteType) + + PtrTimeType = reflect.PtrTo(TimeType) +) + +// Type2SQLType generate SQLType acorrding Go's type +func Type2SQLType(t reflect.Type) (st SQLType) { + switch k := t.Kind(); k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + st = SQLType{Int, 0, 0} + case reflect.Int64, reflect.Uint64: + st = SQLType{BigInt, 0, 0} + case reflect.Float32: + st = SQLType{Float, 0, 0} + case reflect.Float64: + st = SQLType{Double, 0, 0} + case reflect.Complex64, reflect.Complex128: + st = SQLType{Varchar, 64, 0} + case reflect.Array, reflect.Slice, reflect.Map: + if t.Elem() == reflect.TypeOf(c_BYTE_DEFAULT) { + st = SQLType{Blob, 0, 0} + } else { + st = SQLType{Text, 0, 0} + } + case reflect.Bool: + st = SQLType{Bool, 0, 0} + case reflect.String: + st = SQLType{Varchar, 255, 0} + case reflect.Struct: + if t.ConvertibleTo(TimeType) { + st = SQLType{DateTime, 0, 0} + } else { + // TODO need to handle association struct + st = SQLType{Text, 0, 0} + } + case reflect.Ptr: + st = Type2SQLType(t.Elem()) + default: + st = SQLType{Text, 0, 0} + } + return +} + +// default sql type change to go types +func SQLType2Type(st SQLType) reflect.Type { + name := strings.ToUpper(st.Name) + switch name { + case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, Serial: + return reflect.TypeOf(1) + case BigInt, BigSerial: + return reflect.TypeOf(int64(1)) + case Float, Real: + return reflect.TypeOf(float32(1)) + case Double: + return reflect.TypeOf(float64(1)) + case Char, NChar, Varchar, NVarchar, TinyText, Text, NText, MediumText, LongText, Enum, Set, Uuid, Clob, SysName: + return reflect.TypeOf("") + case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary, UniqueIdentifier: + return reflect.TypeOf([]byte{}) + case Bool: + return reflect.TypeOf(true) + case DateTime, Date, Time, TimeStamp, TimeStampz, SmallDateTime, Year: + return reflect.TypeOf(c_TIME_DEFAULT) + case Decimal, Numeric, Money, SmallMoney: + return reflect.TypeOf("") + default: + return reflect.TypeOf("") + } +} diff --git a/vendor/xorm.io/xorm/session.go b/vendor/xorm.io/xorm/session.go new file mode 100644 index 0000000..b6ab3eb --- /dev/null +++ b/vendor/xorm.io/xorm/session.go @@ -0,0 +1,912 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "database/sql" + "encoding/hex" + "errors" + "fmt" + "hash/crc32" + "io" + "reflect" + "strings" + "time" + + "xorm.io/xorm/contexts" + "xorm.io/xorm/convert" + "xorm.io/xorm/core" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/statements" + "xorm.io/xorm/log" + "xorm.io/xorm/schemas" +) + +// ErrFieldIsNotExist columns does not exist +type ErrFieldIsNotExist struct { + FieldName string + TableName string +} + +func (e ErrFieldIsNotExist) Error() string { + return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) +} + +// ErrFieldIsNotValid is not valid +type ErrFieldIsNotValid struct { + FieldName string + TableName string +} + +func (e ErrFieldIsNotValid) Error() string { + return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) +} + +type sessionType bool + +const ( + engineSession sessionType = false + groupSession sessionType = true +) + +// Session keep a pointer to sql.DB and provides all execution of all +// kind of database operations. +type Session struct { + engine *Engine + tx *core.Tx + statement *statements.Statement + isAutoCommit bool + isCommitedOrRollbacked bool + isAutoClose bool + isClosed bool + prepareStmt bool + // Automatically reset the statement after operations that execute a SQL + // query such as Count(), Find(), Get(), ... + autoResetStatement bool + + // !nashtsai! storing these beans due to yet committed tx + afterInsertBeans map[interface{}]*[]func(interface{}) + afterUpdateBeans map[interface{}]*[]func(interface{}) + afterDeleteBeans map[interface{}]*[]func(interface{}) + // -- + + beforeClosures []func(interface{}) + afterClosures []func(interface{}) + afterProcessors []executedProcessor + + stmtCache map[uint32]*core.Stmt //key: hash.Hash32 of (queryStr, len(queryStr)) + + lastSQL string + lastSQLArgs []interface{} + + ctx context.Context + sessionType sessionType +} + +func newSessionID() string { + hash := sha256.New() + _, err := io.CopyN(hash, rand.Reader, 50) + if err != nil { + return "????????????????????" + } + md := hash.Sum(nil) + mdStr := hex.EncodeToString(md) + return mdStr[0:20] +} + +func newSession(engine *Engine) *Session { + var ctx context.Context + if engine.logSessionID { + ctx = context.WithValue(engine.defaultContext, log.SessionIDKey, newSessionID()) + } else { + ctx = engine.defaultContext + } + + session := &Session{ + ctx: ctx, + engine: engine, + tx: nil, + statement: statements.NewStatement( + engine.dialect, + engine.tagParser, + engine.DatabaseTZ, + ), + isClosed: false, + isAutoCommit: true, + isCommitedOrRollbacked: false, + isAutoClose: false, + autoResetStatement: true, + prepareStmt: false, + + afterInsertBeans: make(map[interface{}]*[]func(interface{}), 0), + afterUpdateBeans: make(map[interface{}]*[]func(interface{}), 0), + afterDeleteBeans: make(map[interface{}]*[]func(interface{}), 0), + beforeClosures: make([]func(interface{}), 0), + afterClosures: make([]func(interface{}), 0), + afterProcessors: make([]executedProcessor, 0), + stmtCache: make(map[uint32]*core.Stmt), + + lastSQL: "", + lastSQLArgs: make([]interface{}, 0), + + sessionType: engineSession, + } + if engine.logSessionID { + session.ctx = context.WithValue(session.ctx, log.SessionKey, session) + } + return session +} + +// Close release the connection from pool +func (session *Session) Close() error { + for _, v := range session.stmtCache { + if err := v.Close(); err != nil { + return err + } + } + + if !session.isClosed { + // When Close be called, if session is a transaction and do not call + // Commit or Rollback, then call Rollback. + if session.tx != nil && !session.isCommitedOrRollbacked { + if err := session.Rollback(); err != nil { + return err + } + } + session.tx = nil + session.stmtCache = nil + session.isClosed = true + } + return nil +} + +func (session *Session) db() *core.DB { + return session.engine.db +} + +func (session *Session) Engine() *Engine { + return session.engine +} + +func (session *Session) getQueryer() core.Queryer { + if session.tx != nil { + return session.tx + } + return session.db() +} + +// ContextCache enable context cache or not +func (session *Session) ContextCache(context contexts.ContextCache) *Session { + session.statement.SetContextCache(context) + return session +} + +// IsClosed returns if session is closed +func (session *Session) IsClosed() bool { + return session.isClosed +} + +func (session *Session) resetStatement() { + if session.autoResetStatement { + session.statement.Reset() + } +} + +// Prepare set a flag to session that should be prepare statement before execute query +func (session *Session) Prepare() *Session { + session.prepareStmt = true + return session +} + +// Before Apply before Processor, affected bean is passed to closure arg +func (session *Session) Before(closures func(interface{})) *Session { + if closures != nil { + session.beforeClosures = append(session.beforeClosures, closures) + } + return session +} + +// After Apply after Processor, affected bean is passed to closure arg +func (session *Session) After(closures func(interface{})) *Session { + if closures != nil { + session.afterClosures = append(session.afterClosures, closures) + } + return session +} + +// Table can input a string or pointer to struct for special a table to operate. +func (session *Session) Table(tableNameOrBean interface{}) *Session { + if err := session.statement.SetTable(tableNameOrBean); err != nil { + session.statement.LastError = err + } + return session +} + +// Alias set the table alias +func (session *Session) Alias(alias string) *Session { + session.statement.Alias(alias) + return session +} + +// NoCascade indicate that no cascade load child object +func (session *Session) NoCascade() *Session { + session.statement.UseCascade = false + return session +} + +// ForUpdate Set Read/Write locking for UPDATE +func (session *Session) ForUpdate() *Session { + session.statement.IsForUpdate = true + return session +} + +// NoAutoCondition disable generate SQL condition from beans +func (session *Session) NoAutoCondition(no ...bool) *Session { + session.statement.SetNoAutoCondition(no...) + return session +} + +// Limit provide limit and offset query condition +func (session *Session) Limit(limit int, start ...int) *Session { + session.statement.Limit(limit, start...) + return session +} + +// OrderBy provide order by query condition, the input parameter is the content +// after order by on a sql statement. +func (session *Session) OrderBy(order string) *Session { + session.statement.OrderBy(order) + return session +} + +// Desc provide desc order by query condition, the input parameters are columns. +func (session *Session) Desc(colNames ...string) *Session { + session.statement.Desc(colNames...) + return session +} + +// Asc provide asc order by query condition, the input parameters are columns. +func (session *Session) Asc(colNames ...string) *Session { + session.statement.Asc(colNames...) + return session +} + +// StoreEngine is only avialble mysql dialect currently +func (session *Session) StoreEngine(storeEngine string) *Session { + session.statement.StoreEngine = storeEngine + return session +} + +// Charset is only avialble mysql dialect currently +func (session *Session) Charset(charset string) *Session { + session.statement.Charset = charset + return session +} + +// Cascade indicates if loading sub Struct +func (session *Session) Cascade(trueOrFalse ...bool) *Session { + if len(trueOrFalse) >= 1 { + session.statement.UseCascade = trueOrFalse[0] + } + return session +} + +// MustLogSQL means record SQL or not and don't follow engine's setting +func (session *Session) MustLogSQL(logs ...bool) *Session { + var showSQL = true + if len(logs) > 0 { + showSQL = logs[0] + } + session.ctx = context.WithValue(session.ctx, log.SessionShowSQLKey, showSQL) + return session +} + +// NoCache ask this session do not retrieve data from cache system and +// get data from database directly. +func (session *Session) NoCache() *Session { + session.statement.UseCache = false + return session +} + +// Join join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN +func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session { + session.statement.Join(joinOperator, tablename, condition, args...) + return session +} + +// GroupBy Generate Group By statement +func (session *Session) GroupBy(keys string) *Session { + session.statement.GroupBy(keys) + return session +} + +// Having Generate Having statement +func (session *Session) Having(conditions string) *Session { + session.statement.Having(conditions) + return session +} + +// DB db return the wrapper of sql.DB +func (session *Session) DB() *core.DB { + return session.db() +} + +func (session *Session) canCache() bool { + if session.statement.RefTable == nil || + session.statement.JoinStr != "" || + session.statement.RawSQL != "" || + !session.statement.UseCache || + session.statement.IsForUpdate || + session.tx != nil || + len(session.statement.SelectStr) > 0 { + return false + } + return true +} + +func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, err error) { + crc := crc32.ChecksumIEEE([]byte(sqlStr)) + // TODO try hash(sqlStr+len(sqlStr)) + var has bool + stmt, has = session.stmtCache[crc] + if !has { + stmt, err = db.PrepareContext(session.ctx, sqlStr) + if err != nil { + return nil, err + } + session.stmtCache[crc] = stmt + } + return +} + +func (session *Session) getField(dataStruct *reflect.Value, key string, table *schemas.Table, idx int) (*reflect.Value, error) { + var col *schemas.Column + if col = table.GetColumnIdx(key, idx); col == nil { + return nil, ErrFieldIsNotExist{key, table.Name} + } + + fieldValue, err := col.ValueOfV(dataStruct) + if err != nil { + return nil, err + } + + if !fieldValue.IsValid() || !fieldValue.CanSet() { + return nil, ErrFieldIsNotValid{key, table.Name} + } + + return fieldValue, nil +} + +// Cell cell is a result of one column field +type Cell *interface{} + +func (session *Session) rows2Beans(rows *core.Rows, fields []string, + table *schemas.Table, newElemFunc func([]string) reflect.Value, + sliceValueSetFunc func(*reflect.Value, schemas.PK) error) error { + for rows.Next() { + var newValue = newElemFunc(fields) + bean := newValue.Interface() + dataStruct := newValue.Elem() + + // handle beforeClosures + scanResults, err := session.row2Slice(rows, fields, bean) + if err != nil { + return err + } + pk, err := session.slice2Bean(scanResults, fields, bean, &dataStruct, table) + if err != nil { + return err + } + session.afterProcessors = append(session.afterProcessors, executedProcessor{ + fun: func(*Session, interface{}) error { + return sliceValueSetFunc(&newValue, pk) + }, + session: session, + bean: bean, + }) + } + return nil +} + +func (session *Session) row2Slice(rows *core.Rows, fields []string, bean interface{}) ([]interface{}, error) { + for _, closure := range session.beforeClosures { + closure(bean) + } + + scanResults := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var cell interface{} + scanResults[i] = &cell + } + if err := rows.Scan(scanResults...); err != nil { + return nil, err + } + + executeBeforeSet(bean, fields, scanResults) + + return scanResults, nil +} + +func (session *Session) slice2Bean(scanResults []interface{}, fields []string, bean interface{}, dataStruct *reflect.Value, table *schemas.Table) (schemas.PK, error) { + defer func() { + executeAfterSet(bean, fields, scanResults) + }() + + buildAfterProcessors(session, bean) + + var tempMap = make(map[string]int) + var pk schemas.PK + for ii, key := range fields { + var idx int + var ok bool + var lKey = strings.ToLower(key) + if idx, ok = tempMap[lKey]; !ok { + idx = 0 + } else { + idx = idx + 1 + } + tempMap[lKey] = idx + + fieldValue, err := session.getField(dataStruct, key, table, idx) + if err != nil { + if !strings.Contains(err.Error(), "is not valid") { + session.engine.logger.Warnf("%v", err) + } + continue + } + if fieldValue == nil { + continue + } + rawValue := reflect.Indirect(reflect.ValueOf(scanResults[ii])) + + // if row is null then ignore + if rawValue.Interface() == nil { + continue + } + + if fieldValue.CanAddr() { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + if data, err := value2Bytes(&rawValue); err == nil { + if err := structConvert.FromDB(data); err != nil { + return nil, err + } + } else { + return nil, err + } + continue + } + } + + if _, ok := fieldValue.Interface().(convert.Conversion); ok { + if data, err := value2Bytes(&rawValue); err == nil { + if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { + fieldValue.Set(reflect.New(fieldValue.Type().Elem())) + } + fieldValue.Interface().(convert.Conversion).FromDB(data) + } else { + return nil, err + } + continue + } + + rawValueType := reflect.TypeOf(rawValue.Interface()) + vv := reflect.ValueOf(rawValue.Interface()) + col := table.GetColumnIdx(key, idx) + if col.IsPrimaryKey { + pk = append(pk, rawValue.Interface()) + } + fieldType := fieldValue.Type() + hasAssigned := false + + if col.SQLType.IsJson() { + var bs []byte + if rawValueType.Kind() == reflect.String { + bs = []byte(vv.String()) + } else if rawValueType.ConvertibleTo(schemas.BytesType) { + bs = vv.Bytes() + } else { + return nil, fmt.Errorf("unsupported database data type: %s %v", key, rawValueType.Kind()) + } + + hasAssigned = true + + if len(bs) > 0 { + if fieldType.Kind() == reflect.String { + fieldValue.SetString(string(bs)) + continue + } + if fieldValue.CanAddr() { + err := json.DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) + if err != nil { + return nil, err + } + } else { + x := reflect.New(fieldType) + err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } + } + + continue + } + + switch fieldType.Kind() { + case reflect.Complex64, reflect.Complex128: + // TODO: reimplement this + var bs []byte + if rawValueType.Kind() == reflect.String { + bs = []byte(vv.String()) + } else if rawValueType.ConvertibleTo(schemas.BytesType) { + bs = vv.Bytes() + } + + hasAssigned = true + if len(bs) > 0 { + if fieldValue.CanAddr() { + err := json.DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) + if err != nil { + return nil, err + } + } else { + x := reflect.New(fieldType) + err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } + } + case reflect.Slice, reflect.Array: + switch rawValueType.Kind() { + case reflect.Slice, reflect.Array: + switch rawValueType.Elem().Kind() { + case reflect.Uint8: + if fieldType.Elem().Kind() == reflect.Uint8 { + hasAssigned = true + if col.SQLType.IsText() { + x := reflect.New(fieldType) + err := json.DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } else { + if fieldValue.Len() > 0 { + for i := 0; i < fieldValue.Len(); i++ { + if i < vv.Len() { + fieldValue.Index(i).Set(vv.Index(i)) + } + } + } else { + for i := 0; i < vv.Len(); i++ { + fieldValue.Set(reflect.Append(*fieldValue, vv.Index(i))) + } + } + } + } + } + } + case reflect.String: + if rawValueType.Kind() == reflect.String { + hasAssigned = true + fieldValue.SetString(vv.String()) + } + case reflect.Bool: + if rawValueType.Kind() == reflect.Bool { + hasAssigned = true + fieldValue.SetBool(vv.Bool()) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch rawValueType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + hasAssigned = true + fieldValue.SetInt(vv.Int()) + } + case reflect.Float32, reflect.Float64: + switch rawValueType.Kind() { + case reflect.Float32, reflect.Float64: + hasAssigned = true + fieldValue.SetFloat(vv.Float()) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + switch rawValueType.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + hasAssigned = true + fieldValue.SetUint(vv.Uint()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + hasAssigned = true + fieldValue.SetUint(uint64(vv.Int())) + } + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + dbTZ := session.engine.DatabaseTZ + if col.TimeZone != nil { + dbTZ = col.TimeZone + } + + if rawValueType == schemas.TimeType { + hasAssigned = true + + t := vv.Convert(schemas.TimeType).Interface().(time.Time) + + z, _ := t.Zone() + // set new location if database don't save timezone or give an incorrect timezone + if len(z) == 0 || t.Year() == 0 || t.Location().String() != dbTZ.String() { // !nashtsai! HACK tmp work around for lib/pq doesn't properly time with location + session.engine.logger.Debugf("empty zone key[%v] : %v | zone: %v | location: %+v\n", key, t, z, *t.Location()) + t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), + t.Minute(), t.Second(), t.Nanosecond(), dbTZ) + } + + t = t.In(session.engine.TZLocation) + fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) + } else if rawValueType == schemas.IntType || rawValueType == schemas.Int64Type || + rawValueType == schemas.Int32Type { + hasAssigned = true + + t := time.Unix(vv.Int(), 0).In(session.engine.TZLocation) + fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) + } else { + if d, ok := vv.Interface().([]uint8); ok { + hasAssigned = true + t, err := session.byte2Time(col, d) + if err != nil { + session.engine.logger.Errorf("byte2Time error: %v", err) + hasAssigned = false + } else { + fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) + } + } else if d, ok := vv.Interface().(string); ok { + hasAssigned = true + t, err := session.str2Time(col, d) + if err != nil { + session.engine.logger.Errorf("byte2Time error: %v", err) + hasAssigned = false + } else { + fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) + } + } else { + return nil, fmt.Errorf("rawValueType is %v, value is %v", rawValueType, vv.Interface()) + } + } + } else if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok { + // !! 增加支持sql.Scanner接口的结构,如sql.NullString + hasAssigned = true + if err := nulVal.Scan(vv.Interface()); err != nil { + session.engine.logger.Errorf("sql.Sanner error: %v", err) + hasAssigned = false + } + } else if col.SQLType.IsJson() { + if rawValueType.Kind() == reflect.String { + hasAssigned = true + x := reflect.New(fieldType) + if len([]byte(vv.String())) > 0 { + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } + } else if rawValueType.Kind() == reflect.Slice { + hasAssigned = true + x := reflect.New(fieldType) + if len(vv.Bytes()) > 0 { + err := json.DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } + } + } else if session.statement.UseCascade { + table, err := session.engine.tagParser.ParseWithCache(*fieldValue) + if err != nil { + return nil, err + } + + hasAssigned = true + if len(table.PrimaryKeys) != 1 { + return nil, errors.New("unsupported non or composited primary key cascade") + } + var pk = make(schemas.PK, len(table.PrimaryKeys)) + pk[0], err = asKind(vv, rawValueType) + if err != nil { + return nil, err + } + + if !pk.IsZero() { + // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch + // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne + // property to be fetched lazily + structInter := reflect.New(fieldValue.Type()) + has, err := session.ID(pk).NoCascade().get(structInter.Interface()) + if err != nil { + return nil, err + } + if has { + fieldValue.Set(structInter.Elem()) + } else { + return nil, errors.New("cascade obj is not exist") + } + } + } + case reflect.Ptr: + // !nashtsai! TODO merge duplicated codes above + switch fieldType { + // following types case matching ptr's native type, therefore assign ptr directly + case schemas.PtrStringType: + if rawValueType.Kind() == reflect.String { + x := vv.String() + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrBoolType: + if rawValueType.Kind() == reflect.Bool { + x := vv.Bool() + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrTimeType: + if rawValueType == schemas.PtrTimeType { + hasAssigned = true + var x = rawValue.Interface().(time.Time) + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrFloat64Type: + if rawValueType.Kind() == reflect.Float64 { + x := vv.Float() + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrUint64Type: + if rawValueType.Kind() == reflect.Int64 { + var x = uint64(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrInt64Type: + if rawValueType.Kind() == reflect.Int64 { + x := vv.Int() + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrFloat32Type: + if rawValueType.Kind() == reflect.Float64 { + var x = float32(vv.Float()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrIntType: + if rawValueType.Kind() == reflect.Int64 { + var x = int(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrInt32Type: + if rawValueType.Kind() == reflect.Int64 { + var x = int32(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrInt8Type: + if rawValueType.Kind() == reflect.Int64 { + var x = int8(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrInt16Type: + if rawValueType.Kind() == reflect.Int64 { + var x = int16(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrUintType: + if rawValueType.Kind() == reflect.Int64 { + var x = uint(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrUint32Type: + if rawValueType.Kind() == reflect.Int64 { + var x = uint32(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.Uint8Type: + if rawValueType.Kind() == reflect.Int64 { + var x = uint8(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.Uint16Type: + if rawValueType.Kind() == reflect.Int64 { + var x = uint16(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.Complex64Type: + var x complex64 + if len([]byte(vv.String())) > 0 { + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) + if err != nil { + return nil, err + } + fieldValue.Set(reflect.ValueOf(&x)) + } + hasAssigned = true + case schemas.Complex128Type: + var x complex128 + if len([]byte(vv.String())) > 0 { + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) + if err != nil { + return nil, err + } + fieldValue.Set(reflect.ValueOf(&x)) + } + hasAssigned = true + } // switch fieldType + } // switch fieldType.Kind() + + // !nashtsai! for value can't be assigned directly fallback to convert to []byte then back to value + if !hasAssigned { + data, err := value2Bytes(&rawValue) + if err != nil { + return nil, err + } + + if err = session.bytes2Value(col, fieldValue, data); err != nil { + return nil, err + } + } + } + return pk, nil +} + +// saveLastSQL stores executed query information +func (session *Session) saveLastSQL(sql string, args ...interface{}) { + session.lastSQL = sql + session.lastSQLArgs = args +} + +// LastSQL returns last query information +func (session *Session) LastSQL() (string, []interface{}) { + return session.lastSQL, session.lastSQLArgs +} + +// Unscoped always disable struct tag "deleted" +func (session *Session) Unscoped() *Session { + session.statement.SetUnscoped() + return session +} + +func (session *Session) incrVersionFieldValue(fieldValue *reflect.Value) { + switch fieldValue.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fieldValue.SetInt(fieldValue.Int() + 1) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fieldValue.SetUint(fieldValue.Uint() + 1) + } +} + +// ContextHook sets the context on this session +func (session *Session) Context(ctx context.Context) *Session { + session.ctx = ctx + return session +} + +// PingContext test if database is ok +func (session *Session) PingContext(ctx context.Context) error { + if session.isAutoClose { + defer session.Close() + } + + session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) + return session.DB().PingContext(ctx) +} diff --git a/vendor/xorm.io/xorm/session_cols.go b/vendor/xorm.io/xorm/session_cols.go new file mode 100644 index 0000000..ca3589a --- /dev/null +++ b/vendor/xorm.io/xorm/session_cols.go @@ -0,0 +1,143 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "reflect" + "strings" + "time" + + "xorm.io/xorm/schemas" +) + +func setColumnInt(bean interface{}, col *schemas.Column, t int64) { + v, err := col.ValueOf(bean) + if err != nil { + return + } + if v.CanSet() { + switch v.Type().Kind() { + case reflect.Int, reflect.Int64, reflect.Int32: + v.SetInt(t) + case reflect.Uint, reflect.Uint64, reflect.Uint32: + v.SetUint(uint64(t)) + } + } +} + +func setColumnTime(bean interface{}, col *schemas.Column, t time.Time) { + v, err := col.ValueOf(bean) + if err != nil { + return + } + if v.CanSet() { + switch v.Type().Kind() { + case reflect.Struct: + v.Set(reflect.ValueOf(t).Convert(v.Type())) + case reflect.Int, reflect.Int64, reflect.Int32: + v.SetInt(t.Unix()) + case reflect.Uint, reflect.Uint64, reflect.Uint32: + v.SetUint(uint64(t.Unix())) + } + } +} + +func getFlagForColumn(m map[string]bool, col *schemas.Column) (val bool, has bool) { + if len(m) == 0 { + return false, false + } + + n := len(col.Name) + + for mk := range m { + if len(mk) != n { + continue + } + if strings.EqualFold(mk, col.Name) { + return m[mk], true + } + } + + return false, false +} + +// Incr provides a query string like "count = count + 1" +func (session *Session) Incr(column string, arg ...interface{}) *Session { + session.statement.Incr(column, arg...) + return session +} + +// Decr provides a query string like "count = count - 1" +func (session *Session) Decr(column string, arg ...interface{}) *Session { + session.statement.Decr(column, arg...) + return session +} + +// SetExpr provides a query string like "column = {expression}" +func (session *Session) SetExpr(column string, expression interface{}) *Session { + session.statement.SetExpr(column, expression) + return session +} + +// Select provides some columns to special +func (session *Session) Select(str string) *Session { + session.statement.Select(str) + return session +} + +// Cols provides some columns to special +func (session *Session) Cols(columns ...string) *Session { + session.statement.Cols(columns...) + return session +} + +// AllCols ask all columns +func (session *Session) AllCols() *Session { + session.statement.AllCols() + return session +} + +// MustCols specify some columns must use even if they are empty +func (session *Session) MustCols(columns ...string) *Session { + session.statement.MustCols(columns...) + return session +} + +// UseBool automatically retrieve condition according struct, but +// if struct has bool field, it will ignore them. So use UseBool +// to tell system to do not ignore them. +// If no parameters, it will use all the bool field of struct, or +// it will use parameters's columns +func (session *Session) UseBool(columns ...string) *Session { + session.statement.UseBool(columns...) + return session +} + +// Distinct use for distinct columns. Caution: when you are using cache, +// distinct will not be cached because cache system need id, +// but distinct will not provide id +func (session *Session) Distinct(columns ...string) *Session { + session.statement.Distinct(columns...) + return session +} + +// Omit Only not use the parameters as select or update columns +func (session *Session) Omit(columns ...string) *Session { + session.statement.Omit(columns...) + return session +} + +// Nullable Set null when column is zero-value and nullable for update +func (session *Session) Nullable(columns ...string) *Session { + session.statement.Nullable(columns...) + return session +} + +// NoAutoTime means do not automatically give created field and updated field +// the current time on the current session temporarily +func (session *Session) NoAutoTime() *Session { + session.statement.UseAutoTime = false + return session +} diff --git a/vendor/xorm.io/xorm/session_cond.go b/vendor/xorm.io/xorm/session_cond.go new file mode 100644 index 0000000..25d1714 --- /dev/null +++ b/vendor/xorm.io/xorm/session_cond.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import "xorm.io/builder" + +// SQL provides raw sql input parameter. When you have a complex SQL statement +// and cannot use Where, Id, In and etc. Methods to describe, you can use SQL. +func (session *Session) SQL(query interface{}, args ...interface{}) *Session { + session.statement.SQL(query, args...) + return session +} + +// Where provides custom query condition. +func (session *Session) Where(query interface{}, args ...interface{}) *Session { + session.statement.Where(query, args...) + return session +} + +// And provides custom query condition. +func (session *Session) And(query interface{}, args ...interface{}) *Session { + session.statement.And(query, args...) + return session +} + +// Or provides custom query condition. +func (session *Session) Or(query interface{}, args ...interface{}) *Session { + session.statement.Or(query, args...) + return session +} + +// ID provides converting id as a query condition +func (session *Session) ID(id interface{}) *Session { + session.statement.ID(id) + return session +} + +// In provides a query string like "id in (1, 2, 3)" +func (session *Session) In(column string, args ...interface{}) *Session { + session.statement.In(column, args...) + return session +} + +// NotIn provides a query string like "id in (1, 2, 3)" +func (session *Session) NotIn(column string, args ...interface{}) *Session { + session.statement.NotIn(column, args...) + return session +} + +// Conds returns session query conditions except auto bean conditions +func (session *Session) Conds() builder.Cond { + return session.statement.Conds() +} diff --git a/vendor/xorm.io/xorm/session_convert.go b/vendor/xorm.io/xorm/session_convert.go new file mode 100644 index 0000000..a683994 --- /dev/null +++ b/vendor/xorm.io/xorm/session_convert.go @@ -0,0 +1,529 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "xorm.io/xorm/convert" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (session *Session) str2Time(col *schemas.Column, data string) (outTime time.Time, outErr error) { + sdata := strings.TrimSpace(data) + var x time.Time + var err error + + var parseLoc = session.engine.DatabaseTZ + if col.TimeZone != nil { + parseLoc = col.TimeZone + } + + if sdata == utils.ZeroTime0 || sdata == utils.ZeroTime1 { + } else if !strings.ContainsAny(sdata, "- :") { // !nashtsai! has only found that mymysql driver is using this for time type column + // time stamp + sd, err := strconv.ParseInt(sdata, 10, 64) + if err == nil { + x = time.Unix(sd, 0) + //session.engine.logger.Debugf("time(0) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } else { + //session.engine.logger.Debugf("time(0) err key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } + } else if len(sdata) > 19 && strings.Contains(sdata, "-") { + x, err = time.ParseInLocation(time.RFC3339Nano, sdata, parseLoc) + session.engine.logger.Debugf("time(1) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + if err != nil { + x, err = time.ParseInLocation("2006-01-02 15:04:05.999999999", sdata, parseLoc) + //session.engine.logger.Debugf("time(2) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } + if err != nil { + x, err = time.ParseInLocation("2006-01-02 15:04:05.9999999 Z07:00", sdata, parseLoc) + //session.engine.logger.Debugf("time(3) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } + } else if len(sdata) == 19 && strings.Contains(sdata, "-") { + x, err = time.ParseInLocation("2006-01-02 15:04:05", sdata, parseLoc) + //session.engine.logger.Debugf("time(4) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } else if len(sdata) == 10 && sdata[4] == '-' && sdata[7] == '-' { + x, err = time.ParseInLocation("2006-01-02", sdata, parseLoc) + //session.engine.logger.Debugf("time(5) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } else if col.SQLType.Name == schemas.Time { + if strings.Contains(sdata, " ") { + ssd := strings.Split(sdata, " ") + sdata = ssd[1] + } + + sdata = strings.TrimSpace(sdata) + if session.engine.dialect.URI().DBType == schemas.MYSQL && len(sdata) > 8 { + sdata = sdata[len(sdata)-8:] + } + + st := fmt.Sprintf("2006-01-02 %v", sdata) + x, err = time.ParseInLocation("2006-01-02 15:04:05", st, parseLoc) + //session.engine.logger.Debugf("time(6) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } else { + outErr = fmt.Errorf("unsupported time format %v", sdata) + return + } + if err != nil { + outErr = fmt.Errorf("unsupported time format %v: %v", sdata, err) + return + } + outTime = x.In(session.engine.TZLocation) + return +} + +func (session *Session) byte2Time(col *schemas.Column, data []byte) (outTime time.Time, outErr error) { + return session.str2Time(col, string(data)) +} + +// convert a db data([]byte) to a field value +func (session *Session) bytes2Value(col *schemas.Column, fieldValue *reflect.Value, data []byte) error { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + return structConvert.FromDB(data) + } + + if structConvert, ok := fieldValue.Interface().(convert.Conversion); ok { + return structConvert.FromDB(data) + } + + var v interface{} + key := col.Name + fieldType := fieldValue.Type() + + switch fieldType.Kind() { + case reflect.Complex64, reflect.Complex128: + x := reflect.New(fieldType) + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) + if err != nil { + return err + } + fieldValue.Set(x.Elem()) + } + case reflect.Slice, reflect.Array, reflect.Map: + v = data + t := fieldType.Elem() + k := t.Kind() + if col.SQLType.IsText() { + x := reflect.New(fieldType) + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) + if err != nil { + return err + } + fieldValue.Set(x.Elem()) + } + } else if col.SQLType.IsBlob() { + if k == reflect.Uint8 { + fieldValue.Set(reflect.ValueOf(v)) + } else { + x := reflect.New(fieldType) + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) + if err != nil { + return err + } + fieldValue.Set(x.Elem()) + } + } + } else { + return ErrUnSupportedType + } + case reflect.String: + fieldValue.SetString(string(data)) + case reflect.Bool: + v, err := asBool(data) + if err != nil { + return fmt.Errorf("arg %v as bool: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(v)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sdata := string(data) + var x int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + session.engine.dialect.URI().DBType == schemas.MYSQL { // !nashtsai! TODO dialect needs to provide conversion interface API + if len(data) == 1 { + x = int64(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x, err = strconv.ParseInt(sdata, 16, 64) + } else if strings.HasPrefix(sdata, "0") { + x, err = strconv.ParseInt(sdata, 8, 64) + } else if strings.EqualFold(sdata, "true") { + x = 1 + } else if strings.EqualFold(sdata, "false") { + x = 0 + } else { + x, err = strconv.ParseInt(sdata, 10, 64) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.SetInt(x) + case reflect.Float32, reflect.Float64: + x, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return fmt.Errorf("arg %v as float64: %s", key, err.Error()) + } + fieldValue.SetFloat(x) + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + x, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.SetUint(x) + //Currently only support Time type + case reflect.Struct: + // !! 增加支持sql.Scanner接口的结构,如sql.NullString + if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok { + if err := nulVal.Scan(data); err != nil { + return fmt.Errorf("sql.Scan(%v) failed: %s ", data, err.Error()) + } + } else { + if fieldType.ConvertibleTo(schemas.TimeType) { + x, err := session.byte2Time(col, data) + if err != nil { + return err + } + v = x + fieldValue.Set(reflect.ValueOf(v).Convert(fieldType)) + } else if session.statement.UseCascade { + table, err := session.engine.tagParser.ParseWithCache(*fieldValue) + if err != nil { + return err + } + + // TODO: current only support 1 primary key + if len(table.PrimaryKeys) > 1 { + return errors.New("unsupported composited primary key cascade") + } + + var pk = make(schemas.PK, len(table.PrimaryKeys)) + rawValueType := table.ColumnType(table.PKColumns()[0].FieldName) + pk[0], err = str2PK(string(data), rawValueType) + if err != nil { + return err + } + + if !pk.IsZero() { + // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch + // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne + // property to be fetched lazily + structInter := reflect.New(fieldValue.Type()) + has, err := session.ID(pk).NoCascade().get(structInter.Interface()) + if err != nil { + return err + } + if has { + v = structInter.Elem().Interface() + fieldValue.Set(reflect.ValueOf(v)) + } else { + return errors.New("cascade obj is not exist") + } + } + } + } + case reflect.Ptr: + // !nashtsai! TODO merge duplicated codes above + //typeStr := fieldType.String() + switch fieldType.Elem().Kind() { + // case "*string": + case schemas.StringType.Kind(): + x := string(data) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*bool": + case schemas.BoolType.Kind(): + d := string(data) + v, err := strconv.ParseBool(d) + if err != nil { + return fmt.Errorf("arg %v as bool: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&v).Convert(fieldType)) + // case "*complex64": + case schemas.Complex64Type.Kind(): + var x complex64 + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, &x) + if err != nil { + return err + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + } + // case "*complex128": + case schemas.Complex128Type.Kind(): + var x complex128 + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, &x) + if err != nil { + return err + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + } + // case "*float64": + case schemas.Float64Type.Kind(): + x, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return fmt.Errorf("arg %v as float64: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*float32": + case schemas.Float32Type.Kind(): + var x float32 + x1, err := strconv.ParseFloat(string(data), 32) + if err != nil { + return fmt.Errorf("arg %v as float32: %s", key, err.Error()) + } + x = float32(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint64": + case schemas.Uint64Type.Kind(): + var x uint64 + x, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint": + case schemas.UintType.Kind(): + var x uint + x1, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + x = uint(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint32": + case schemas.Uint32Type.Kind(): + var x uint32 + x1, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + x = uint32(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint8": + case schemas.Uint8Type.Kind(): + var x uint8 + x1, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + x = uint8(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint16": + case schemas.Uint16Type.Kind(): + var x uint16 + x1, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + x = uint16(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int64": + case schemas.Int64Type.Kind(): + sdata := string(data) + var x int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + strings.Contains(session.engine.DriverName(), "mysql") { + if len(data) == 1 { + x = int64(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x, err = strconv.ParseInt(sdata, 16, 64) + } else if strings.HasPrefix(sdata, "0") { + x, err = strconv.ParseInt(sdata, 8, 64) + } else { + x, err = strconv.ParseInt(sdata, 10, 64) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int": + case schemas.IntType.Kind(): + sdata := string(data) + var x int + var x1 int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + strings.Contains(session.engine.DriverName(), "mysql") { + if len(data) == 1 { + x = int(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x1, err = strconv.ParseInt(sdata, 16, 64) + x = int(x1) + } else if strings.HasPrefix(sdata, "0") { + x1, err = strconv.ParseInt(sdata, 8, 64) + x = int(x1) + } else { + x1, err = strconv.ParseInt(sdata, 10, 64) + x = int(x1) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int32": + case schemas.Int32Type.Kind(): + sdata := string(data) + var x int32 + var x1 int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + session.engine.dialect.URI().DBType == schemas.MYSQL { + if len(data) == 1 { + x = int32(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x1, err = strconv.ParseInt(sdata, 16, 64) + x = int32(x1) + } else if strings.HasPrefix(sdata, "0") { + x1, err = strconv.ParseInt(sdata, 8, 64) + x = int32(x1) + } else { + x1, err = strconv.ParseInt(sdata, 10, 64) + x = int32(x1) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int8": + case schemas.Int8Type.Kind(): + sdata := string(data) + var x int8 + var x1 int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + strings.Contains(session.engine.DriverName(), "mysql") { + if len(data) == 1 { + x = int8(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x1, err = strconv.ParseInt(sdata, 16, 64) + x = int8(x1) + } else if strings.HasPrefix(sdata, "0") { + x1, err = strconv.ParseInt(sdata, 8, 64) + x = int8(x1) + } else { + x1, err = strconv.ParseInt(sdata, 10, 64) + x = int8(x1) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int16": + case schemas.Int16Type.Kind(): + sdata := string(data) + var x int16 + var x1 int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + strings.Contains(session.engine.DriverName(), "mysql") { + if len(data) == 1 { + x = int16(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x1, err = strconv.ParseInt(sdata, 16, 64) + x = int16(x1) + } else if strings.HasPrefix(sdata, "0") { + x1, err = strconv.ParseInt(sdata, 8, 64) + x = int16(x1) + } else { + x1, err = strconv.ParseInt(sdata, 10, 64) + x = int16(x1) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*SomeStruct": + case reflect.Struct: + switch fieldType { + // case "*.time.Time": + case schemas.PtrTimeType: + x, err := session.byte2Time(col, data) + if err != nil { + return err + } + v = x + fieldValue.Set(reflect.ValueOf(&x)) + default: + if session.statement.UseCascade { + structInter := reflect.New(fieldType.Elem()) + table, err := session.engine.tagParser.ParseWithCache(structInter.Elem()) + if err != nil { + return err + } + + if len(table.PrimaryKeys) > 1 { + return errors.New("unsupported composited primary key cascade") + } + + var pk = make(schemas.PK, len(table.PrimaryKeys)) + rawValueType := table.ColumnType(table.PKColumns()[0].FieldName) + pk[0], err = str2PK(string(data), rawValueType) + if err != nil { + return err + } + + if !pk.IsZero() { + // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch + // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne + // property to be fetched lazily + has, err := session.ID(pk).NoCascade().get(structInter.Interface()) + if err != nil { + return err + } + if has { + v = structInter.Interface() + fieldValue.Set(reflect.ValueOf(v)) + } else { + return errors.New("cascade obj is not exist") + } + } + } else { + return fmt.Errorf("unsupported struct type in Scan: %s", fieldValue.Type().String()) + } + } + default: + return fmt.Errorf("unsupported type in Scan: %s", fieldValue.Type().String()) + } + default: + return fmt.Errorf("unsupported type in Scan: %s", fieldValue.Type().String()) + } + + return nil +} diff --git a/vendor/xorm.io/xorm/session_delete.go b/vendor/xorm.io/xorm/session_delete.go new file mode 100644 index 0000000..13bf791 --- /dev/null +++ b/vendor/xorm.io/xorm/session_delete.go @@ -0,0 +1,251 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" + "fmt" + "strconv" + + "xorm.io/xorm/caches" + "xorm.io/xorm/schemas" +) + +var ( + // ErrNeedDeletedCond delete needs less one condition error + ErrNeedDeletedCond = errors.New("Delete action needs at least one condition") + + // ErrNotImplemented not implemented + ErrNotImplemented = errors.New("Not implemented") +) + +func (session *Session) cacheDelete(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error { + if table == nil || + session.tx != nil { + return ErrCacheFailed + } + + for _, filter := range session.engine.dialect.Filters() { + sqlStr = filter.Do(sqlStr) + } + + newsql := session.statement.ConvertIDSQL(sqlStr) + if newsql == "" { + return ErrCacheFailed + } + + cacher := session.engine.cacherMgr.GetCacher(tableName) + pkColumns := table.PKColumns() + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) + if err != nil { + resultsSlice, err := session.queryBytes(newsql, args...) + if err != nil { + return err + } + ids = make([]schemas.PK, 0) + if len(resultsSlice) > 0 { + for _, data := range resultsSlice { + var id int64 + var pk schemas.PK = make([]interface{}, 0) + for _, col := range pkColumns { + if v, ok := data[col.Name]; !ok { + return errors.New("no id") + } else if col.SQLType.IsText() { + pk = append(pk, string(v)) + } else if col.SQLType.IsNumeric() { + id, err = strconv.ParseInt(string(v), 10, 64) + if err != nil { + return err + } + pk = append(pk, id) + } else { + return errors.New("not supported primary key type") + } + } + ids = append(ids, pk) + } + } + } + + for _, id := range ids { + session.engine.logger.Debugf("[cache] delete cache obj: %v, %v", tableName, id) + sid, err := id.ToString() + if err != nil { + return err + } + cacher.DelBean(tableName, sid) + } + session.engine.logger.Debugf("[cache] clear cache table: %v", tableName) + cacher.ClearIds(tableName) + return nil +} + +// Delete records, bean's non-empty fields are conditions +func (session *Session) Delete(bean interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + if session.statement.LastError != nil { + return 0, session.statement.LastError + } + + if err := session.statement.SetRefBean(bean); err != nil { + return 0, err + } + + executeBeforeClosures(session, bean) + + if processor, ok := interface{}(bean).(BeforeDeleteProcessor); ok { + processor.BeforeDelete() + } + + condSQL, condArgs, err := session.statement.GenConds(bean) + if err != nil { + return 0, err + } + pLimitN := session.statement.LimitN + if len(condSQL) == 0 && (pLimitN == nil || *pLimitN == 0) { + return 0, ErrNeedDeletedCond + } + + var tableNameNoQuote = session.statement.TableName() + var tableName = session.engine.Quote(tableNameNoQuote) + var table = session.statement.RefTable + var deleteSQL string + if len(condSQL) > 0 { + deleteSQL = fmt.Sprintf("DELETE FROM %v WHERE %v", tableName, condSQL) + } else { + deleteSQL = fmt.Sprintf("DELETE FROM %v", tableName) + } + + var orderSQL string + if len(session.statement.OrderStr) > 0 { + orderSQL += fmt.Sprintf(" ORDER BY %s", session.statement.OrderStr) + } + if pLimitN != nil && *pLimitN > 0 { + limitNValue := *pLimitN + orderSQL += fmt.Sprintf(" LIMIT %d", limitNValue) + } + + if len(orderSQL) > 0 { + switch session.engine.dialect.URI().DBType { + case schemas.POSTGRES: + inSQL := fmt.Sprintf("ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQL) + if len(condSQL) > 0 { + deleteSQL += " AND " + inSQL + } else { + deleteSQL += " WHERE " + inSQL + } + case schemas.SQLITE: + inSQL := fmt.Sprintf("rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQL) + if len(condSQL) > 0 { + deleteSQL += " AND " + inSQL + } else { + deleteSQL += " WHERE " + inSQL + } + // TODO: how to handle delete limit on mssql? + case schemas.MSSQL: + return 0, ErrNotImplemented + default: + deleteSQL += orderSQL + } + } + + var realSQL string + argsForCache := make([]interface{}, 0, len(condArgs)*2) + if session.statement.GetUnscoped() || table.DeletedColumn() == nil { // tag "deleted" is disabled + realSQL = deleteSQL + copy(argsForCache, condArgs) + argsForCache = append(condArgs, argsForCache...) + } else { + // !oinume! sqlStrForCache and argsForCache is needed to behave as executing "DELETE FROM ..." for caches. + copy(argsForCache, condArgs) + argsForCache = append(condArgs, argsForCache...) + + deletedColumn := table.DeletedColumn() + realSQL = fmt.Sprintf("UPDATE %v SET %v = ? WHERE %v", + session.engine.Quote(session.statement.TableName()), + session.engine.Quote(deletedColumn.Name), + condSQL) + + if len(orderSQL) > 0 { + switch session.engine.dialect.URI().DBType { + case schemas.POSTGRES: + inSQL := fmt.Sprintf("ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQL) + if len(condSQL) > 0 { + realSQL += " AND " + inSQL + } else { + realSQL += " WHERE " + inSQL + } + case schemas.SQLITE: + inSQL := fmt.Sprintf("rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQL) + if len(condSQL) > 0 { + realSQL += " AND " + inSQL + } else { + realSQL += " WHERE " + inSQL + } + // TODO: how to handle delete limit on mssql? + case schemas.MSSQL: + return 0, ErrNotImplemented + default: + realSQL += orderSQL + } + } + + // !oinume! Insert nowTime to the head of session.statement.Params + condArgs = append(condArgs, "") + paramsLen := len(condArgs) + copy(condArgs[1:paramsLen], condArgs[0:paramsLen-1]) + + val, t := session.engine.nowTime(deletedColumn) + condArgs[0] = val + + var colName = deletedColumn.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } + + if cacher := session.engine.GetCacher(tableNameNoQuote); cacher != nil && session.statement.UseCache { + session.cacheDelete(table, tableNameNoQuote, deleteSQL, argsForCache...) + } + + session.statement.RefTable = table + res, err := session.exec(realSQL, condArgs...) + if err != nil { + return 0, err + } + + // handle after delete processors + if session.isAutoCommit { + for _, closure := range session.afterClosures { + closure(bean) + } + if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok { + processor.AfterDelete() + } + } else { + lenAfterClosures := len(session.afterClosures) + if lenAfterClosures > 0 { + if value, has := session.afterDeleteBeans[bean]; has && value != nil { + *value = append(*value, session.afterClosures...) + } else { + afterClosures := make([]func(interface{}), lenAfterClosures) + copy(afterClosures, session.afterClosures) + session.afterDeleteBeans[bean] = &afterClosures + } + } else { + if _, ok := interface{}(bean).(AfterDeleteProcessor); ok { + session.afterDeleteBeans[bean] = nil + } + } + } + cleanupProcessorsClosures(&session.afterClosures) + // -- + + return res.RowsAffected() +} diff --git a/vendor/xorm.io/xorm/session_exist.go b/vendor/xorm.io/xorm/session_exist.go new file mode 100644 index 0000000..e52c618 --- /dev/null +++ b/vendor/xorm.io/xorm/session_exist.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +// Exist returns true if the record exist otherwise return false +func (session *Session) Exist(bean ...interface{}) (bool, error) { + if session.isAutoClose { + defer session.Close() + } + + if session.statement.LastError != nil { + return false, session.statement.LastError + } + + sqlStr, args, err := session.statement.GenExistSQL(bean...) + if err != nil { + return false, err + } + + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return false, err + } + defer rows.Close() + + return rows.Next(), nil +} diff --git a/vendor/xorm.io/xorm/session_find.go b/vendor/xorm.io/xorm/session_find.go new file mode 100644 index 0000000..642093f --- /dev/null +++ b/vendor/xorm.io/xorm/session_find.go @@ -0,0 +1,490 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" + "fmt" + "reflect" + + "xorm.io/builder" + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/statements" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +const ( + tpStruct = iota + tpNonStruct +) + +// Find retrieve records from table, condiBeans's non-empty fields +// are conditions. beans could be []Struct, []*Struct, map[int64]Struct +// map[int64]*Struct +func (session *Session) Find(rowsSlicePtr interface{}, condiBean ...interface{}) error { + if session.isAutoClose { + defer session.Close() + } + return session.find(rowsSlicePtr, condiBean...) +} + +// FindAndCount find the results and also return the counts +func (session *Session) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + session.autoResetStatement = false + err := session.find(rowsSlicePtr, condiBean...) + if err != nil { + return 0, err + } + + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map { + return 0, errors.New("needs a pointer to a slice or a map") + } + + sliceElementType := sliceValue.Type().Elem() + if sliceElementType.Kind() == reflect.Ptr { + sliceElementType = sliceElementType.Elem() + } + session.autoResetStatement = true + + if session.statement.SelectStr != "" { + session.statement.SelectStr = "" + } + if session.statement.OrderStr != "" { + session.statement.OrderStr = "" + } + if session.statement.LimitN != nil { + session.statement.LimitN = nil + } + if session.statement.Start > 0 { + session.statement.Start = 0 + } + + // session has stored the conditions so we use `unscoped` to avoid duplicated condition. + return session.Unscoped().Count(reflect.New(sliceElementType).Interface()) +} + +func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) error { + defer session.resetStatement() + if session.statement.LastError != nil { + return session.statement.LastError + } + + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + var isSlice = sliceValue.Kind() == reflect.Slice + var isMap = sliceValue.Kind() == reflect.Map + if !isSlice && !isMap { + return errors.New("needs a pointer to a slice or a map") + } + + sliceElementType := sliceValue.Type().Elem() + + var tp = tpStruct + if session.statement.RefTable == nil { + if sliceElementType.Kind() == reflect.Ptr { + if sliceElementType.Elem().Kind() == reflect.Struct { + pv := reflect.New(sliceElementType.Elem()) + if err := session.statement.SetRefValue(pv); err != nil { + return err + } + } else { + tp = tpNonStruct + } + } else if sliceElementType.Kind() == reflect.Struct { + pv := reflect.New(sliceElementType) + if err := session.statement.SetRefValue(pv); err != nil { + return err + } + } else { + tp = tpNonStruct + } + } + + var ( + table = session.statement.RefTable + addedTableName = (len(session.statement.JoinStr) > 0) + autoCond builder.Cond + ) + if tp == tpStruct { + if !session.statement.NoAutoCondition && len(condiBean) > 0 { + condTable, err := session.engine.tagParser.Parse(reflect.ValueOf(condiBean[0])) + if err != nil { + return err + } + autoCond, err = session.statement.BuildConds(condTable, condiBean[0], true, true, false, true, addedTableName) + if err != nil { + return err + } + } else { + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + autoCond = session.statement.CondDeleted(col) + } + } + } + + // if it's a map with Cols but primary key not in column list, we still need the primary key + if isMap && !session.statement.ColumnMap.IsEmpty() { + for _, k := range session.statement.RefTable.PrimaryKeys { + session.statement.ColumnMap.Add(k) + } + } + + sqlStr, args, err := session.statement.GenFindSQL(autoCond) + if err != nil { + return err + } + + if session.statement.ColumnMap.IsEmpty() && session.canCache() { + if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil && + !session.statement.IsDistinct && + !session.statement.GetUnscoped() { + err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...) + if err != ErrCacheFailed { + return err + } + err = nil // !nashtsai! reset err to nil for ErrCacheFailed + session.engine.logger.Warnf("Cache Find Failed") + } + } + + return session.noCacheFind(table, sliceValue, sqlStr, args...) +} + +func (session *Session) noCacheFind(table *schemas.Table, containerValue reflect.Value, sqlStr string, args ...interface{}) error { + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return err + } + defer rows.Close() + + fields, err := rows.Columns() + if err != nil { + return err + } + + var newElemFunc func(fields []string) reflect.Value + elemType := containerValue.Type().Elem() + var isPointer bool + if elemType.Kind() == reflect.Ptr { + isPointer = true + elemType = elemType.Elem() + } + if elemType.Kind() == reflect.Ptr { + return errors.New("pointer to pointer is not supported") + } + + newElemFunc = func(fields []string) reflect.Value { + switch elemType.Kind() { + case reflect.Slice: + slice := reflect.MakeSlice(elemType, len(fields), len(fields)) + x := reflect.New(slice.Type()) + x.Elem().Set(slice) + return x + case reflect.Map: + mp := reflect.MakeMap(elemType) + x := reflect.New(mp.Type()) + x.Elem().Set(mp) + return x + } + return reflect.New(elemType) + } + + var containerValueSetFunc func(*reflect.Value, schemas.PK) error + + if containerValue.Kind() == reflect.Slice { + containerValueSetFunc = func(newValue *reflect.Value, pk schemas.PK) error { + if isPointer { + containerValue.Set(reflect.Append(containerValue, newValue.Elem().Addr())) + } else { + containerValue.Set(reflect.Append(containerValue, newValue.Elem())) + } + return nil + } + } else { + keyType := containerValue.Type().Key() + if len(table.PrimaryKeys) == 0 { + return errors.New("don't support multiple primary key's map has non-slice key type") + } + if len(table.PrimaryKeys) > 1 && keyType.Kind() != reflect.Slice { + return errors.New("don't support multiple primary key's map has non-slice key type") + } + + containerValueSetFunc = func(newValue *reflect.Value, pk schemas.PK) error { + keyValue := reflect.New(keyType) + err := convertPKToValue(table, keyValue.Interface(), pk) + if err != nil { + return err + } + if isPointer { + containerValue.SetMapIndex(keyValue.Elem(), newValue.Elem().Addr()) + } else { + containerValue.SetMapIndex(keyValue.Elem(), newValue.Elem()) + } + return nil + } + } + + if elemType.Kind() == reflect.Struct { + var newValue = newElemFunc(fields) + dataStruct := utils.ReflectValue(newValue.Interface()) + tb, err := session.engine.tagParser.ParseWithCache(dataStruct) + if err != nil { + return err + } + err = session.rows2Beans(rows, fields, tb, newElemFunc, containerValueSetFunc) + rows.Close() + if err != nil { + return err + } + return session.executeProcessors() + } + + for rows.Next() { + var newValue = newElemFunc(fields) + bean := newValue.Interface() + + switch elemType.Kind() { + case reflect.Slice: + err = rows.ScanSlice(bean) + case reflect.Map: + err = rows.ScanMap(bean) + default: + err = rows.Scan(bean) + } + + if err != nil { + return err + } + + if err := containerValueSetFunc(&newValue, nil); err != nil { + return err + } + } + return nil +} + +func convertPKToValue(table *schemas.Table, dst interface{}, pk schemas.PK) error { + cols := table.PKColumns() + if len(cols) == 1 { + return convertAssign(dst, pk[0]) + } + + dst = pk + return nil +} + +func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr interface{}, args ...interface{}) (err error) { + if !session.canCache() || + utils.IndexNoCase(sqlStr, "having") != -1 || + utils.IndexNoCase(sqlStr, "group by") != -1 { + return ErrCacheFailed + } + + tableName := session.statement.TableName() + cacher := session.engine.cacherMgr.GetCacher(tableName) + if cacher == nil { + return nil + } + + for _, filter := range session.engine.dialect.Filters() { + sqlStr = filter.Do(sqlStr) + } + + newsql := session.statement.ConvertIDSQL(sqlStr) + if newsql == "" { + return ErrCacheFailed + } + + table := session.statement.RefTable + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) + if err != nil { + rows, err := session.queryRows(newsql, args...) + if err != nil { + return err + } + defer rows.Close() + + var i int + ids = make([]schemas.PK, 0) + for rows.Next() { + i++ + if i > 500 { + session.engine.logger.Debugf("[cacheFind] ids length > 500, no cache") + return ErrCacheFailed + } + var res = make([]string, len(table.PrimaryKeys)) + err = rows.ScanSlice(&res) + if err != nil { + return err + } + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) + for i, col := range table.PKColumns() { + pk[i], err = col.ConvertID(res[i]) + if err != nil { + return err + } + } + + ids = append(ids, pk) + } + + session.engine.logger.Debugf("[cache] cache sql: %v, %v, %v, %v, %v", ids, tableName, sqlStr, newsql, args) + err = caches.PutCacheSql(cacher, ids, tableName, newsql, args) + if err != nil { + return err + } + } else { + session.engine.logger.Debugf("[cache] cache hit sql: %v, %v, %v, %v", tableName, sqlStr, newsql, args) + } + + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + + ididxes := make(map[string]int) + var ides []schemas.PK + var temps = make([]interface{}, len(ids)) + + for idx, id := range ids { + sid, err := id.ToString() + if err != nil { + return err + } + bean := cacher.GetBean(tableName, sid) + + // fix issue #894 + isHit := func() (ht bool) { + if bean == nil { + ht = false + return + } + ckb := reflect.ValueOf(bean).Elem().Type() + ht = ckb == t + if !ht && t.Kind() == reflect.Ptr { + ht = t.Elem() == ckb + } + return + } + if !isHit() { + ides = append(ides, id) + ididxes[sid] = idx + } else { + session.engine.logger.Debugf("[cache] cache hit bean: %v, %v, %v", tableName, id, bean) + + pk, err := table.IDOfV(reflect.ValueOf(bean)) + if err != nil { + return err + } + + xid, err := pk.ToString() + if err != nil { + return err + } + + if sid != xid { + session.engine.logger.Errorf("[cache] error cache: %v, %v, %v", xid, sid, bean) + return ErrCacheFailed + } + temps[idx] = bean + } + } + + if len(ides) > 0 { + slices := reflect.New(reflect.SliceOf(t)) + beans := slices.Interface() + + statement := session.statement + session.statement = statements.NewStatement( + session.engine.dialect, + session.engine.tagParser, + session.engine.DatabaseTZ, + ) + if len(table.PrimaryKeys) == 1 { + ff := make([]interface{}, 0, len(ides)) + for _, ie := range ides { + ff = append(ff, ie[0]) + } + + session.In("`"+table.PrimaryKeys[0]+"`", ff...) + } else { + for _, ie := range ides { + cond := builder.NewCond() + for i, name := range table.PrimaryKeys { + cond = cond.And(builder.Eq{"`" + name + "`": ie[i]}) + } + session.Or(cond) + } + } + + err = session.NoCache().Table(tableName).find(beans) + if err != nil { + return err + } + session.statement = statement + + vs := reflect.Indirect(reflect.ValueOf(beans)) + for i := 0; i < vs.Len(); i++ { + rv := vs.Index(i) + if rv.Kind() != reflect.Ptr { + rv = rv.Addr() + } + id, err := table.IDOfV(rv) + if err != nil { + return err + } + sid, err := id.ToString() + if err != nil { + return err + } + + bean := rv.Interface() + temps[ididxes[sid]] = bean + session.engine.logger.Debugf("[cache] cache bean: %v, %v, %v, %v", tableName, id, bean, temps) + cacher.PutBean(tableName, sid, bean) + } + } + + for j := 0; j < len(temps); j++ { + bean := temps[j] + if bean == nil { + session.engine.logger.Warnf("[cache] cache no hit: %v, %v, %v", tableName, ids[j], temps) + // return errors.New("cache error") // !nashtsai! no need to return error, but continue instead + continue + } + if sliceValue.Kind() == reflect.Slice { + if t.Kind() == reflect.Ptr { + sliceValue.Set(reflect.Append(sliceValue, reflect.ValueOf(bean))) + } else { + sliceValue.Set(reflect.Append(sliceValue, reflect.Indirect(reflect.ValueOf(bean)))) + } + } else if sliceValue.Kind() == reflect.Map { + var key = ids[j] + keyType := sliceValue.Type().Key() + var ikey interface{} + if len(key) == 1 { + ikey, err = str2PK(fmt.Sprintf("%v", key[0]), keyType) + if err != nil { + return err + } + } else { + if keyType.Kind() != reflect.Slice { + return errors.New("table have multiple primary keys, key is not schemas.PK or slice") + } + ikey = key + } + + if t.Kind() == reflect.Ptr { + sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.ValueOf(bean)) + } else { + sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.Indirect(reflect.ValueOf(bean))) + } + } + } + + return nil +} diff --git a/vendor/xorm.io/xorm/session_get.go b/vendor/xorm.io/xorm/session_get.go new file mode 100644 index 0000000..afedcd1 --- /dev/null +++ b/vendor/xorm.io/xorm/session_get.go @@ -0,0 +1,358 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strconv" + + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +// Get retrieve one record from database, bean's non-empty fields +// will be as conditions +func (session *Session) Get(bean interface{}) (bool, error) { + if session.isAutoClose { + defer session.Close() + } + return session.get(bean) +} + +func (session *Session) get(bean interface{}) (bool, error) { + defer session.resetStatement() + + if session.statement.LastError != nil { + return false, session.statement.LastError + } + + beanValue := reflect.ValueOf(bean) + if beanValue.Kind() != reflect.Ptr { + return false, errors.New("needs a pointer to a value") + } else if beanValue.Elem().Kind() == reflect.Ptr { + return false, errors.New("a pointer to a pointer is not allowed") + } + + if beanValue.Elem().Kind() == reflect.Struct { + if err := session.statement.SetRefBean(bean); err != nil { + return false, err + } + } + + var sqlStr string + var args []interface{} + var err error + + if session.statement.RawSQL == "" { + if len(session.statement.TableName()) <= 0 { + return false, ErrTableNotFound + } + session.statement.Limit(1) + sqlStr, args, err = session.statement.GenGetSQL(bean) + if err != nil { + return false, err + } + } else { + sqlStr = session.statement.GenRawSQL() + args = session.statement.RawParams + } + + table := session.statement.RefTable + + if session.statement.ColumnMap.IsEmpty() && session.canCache() && beanValue.Elem().Kind() == reflect.Struct { + if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil && + !session.statement.GetUnscoped() { + has, err := session.cacheGet(bean, sqlStr, args...) + if err != ErrCacheFailed { + return has, err + } + } + } + + context := session.statement.Context + if context != nil { + res := context.Get(fmt.Sprintf("%v-%v", sqlStr, args)) + if res != nil { + session.engine.logger.Debugf("hit context cache: %s", sqlStr) + + structValue := reflect.Indirect(reflect.ValueOf(bean)) + structValue.Set(reflect.Indirect(reflect.ValueOf(res))) + session.lastSQL = "" + session.lastSQLArgs = nil + return true, nil + } + } + + has, err := session.nocacheGet(beanValue.Elem().Kind(), table, bean, sqlStr, args...) + if err != nil || !has { + return has, err + } + + if context != nil { + context.Put(fmt.Sprintf("%v-%v", sqlStr, args), bean) + } + + return true, nil +} + +func (session *Session) nocacheGet(beanKind reflect.Kind, table *schemas.Table, bean interface{}, sqlStr string, args ...interface{}) (bool, error) { + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return false, err + } + defer rows.Close() + + if !rows.Next() { + if rows.Err() != nil { + return false, rows.Err() + } + return false, nil + } + + switch bean.(type) { + case sql.NullInt64, sql.NullBool, sql.NullFloat64, sql.NullString: + return true, rows.Scan(&bean) + case *sql.NullInt64, *sql.NullBool, *sql.NullFloat64, *sql.NullString: + return true, rows.Scan(bean) + case *string: + var res sql.NullString + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*string)) = res.String + } + return true, nil + case *int: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int)) = int(res.Int64) + } + return true, nil + case *int8: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int8)) = int8(res.Int64) + } + return true, nil + case *int16: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int16)) = int16(res.Int64) + } + return true, nil + case *int32: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int32)) = int32(res.Int64) + } + return true, nil + case *int64: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int64)) = int64(res.Int64) + } + return true, nil + case *uint: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint)) = uint(res.Int64) + } + return true, nil + case *uint8: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint8)) = uint8(res.Int64) + } + return true, nil + case *uint16: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint16)) = uint16(res.Int64) + } + return true, nil + case *uint32: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint32)) = uint32(res.Int64) + } + return true, nil + case *uint64: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint64)) = uint64(res.Int64) + } + return true, nil + case *bool: + var res sql.NullBool + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*bool)) = res.Bool + } + return true, nil + } + + switch beanKind { + case reflect.Struct: + fields, err := rows.Columns() + if err != nil { + // WARN: Alougth rows return true, but get fields failed + return true, err + } + + scanResults, err := session.row2Slice(rows, fields, bean) + if err != nil { + return false, err + } + // close it before convert data + rows.Close() + + dataStruct := utils.ReflectValue(bean) + _, err = session.slice2Bean(scanResults, fields, bean, &dataStruct, table) + if err != nil { + return true, err + } + + return true, session.executeProcessors() + case reflect.Slice: + err = rows.ScanSlice(bean) + case reflect.Map: + err = rows.ScanMap(bean) + case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + err = rows.Scan(bean) + default: + err = rows.Scan(bean) + } + + return true, err +} + +func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interface{}) (has bool, err error) { + // if has no reftable, then don't use cache currently + if !session.canCache() { + return false, ErrCacheFailed + } + + for _, filter := range session.engine.dialect.Filters() { + sqlStr = filter.Do(sqlStr) + } + newsql := session.statement.ConvertIDSQL(sqlStr) + if newsql == "" { + return false, ErrCacheFailed + } + + tableName := session.statement.TableName() + cacher := session.engine.cacherMgr.GetCacher(tableName) + + session.engine.logger.Debugf("[cache] Get SQL: %s, %v", newsql, args) + table := session.statement.RefTable + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) + if err != nil { + var res = make([]string, len(table.PrimaryKeys)) + rows, err := session.NoCache().queryRows(newsql, args...) + if err != nil { + return false, err + } + defer rows.Close() + + if rows.Next() { + err = rows.ScanSlice(&res) + if err != nil { + return false, err + } + } else { + return false, ErrCacheFailed + } + + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) + for i, col := range table.PKColumns() { + if col.SQLType.IsText() { + pk[i] = res[i] + } else if col.SQLType.IsNumeric() { + n, err := strconv.ParseInt(res[i], 10, 64) + if err != nil { + return false, err + } + pk[i] = n + } else { + return false, errors.New("unsupported") + } + } + + ids = []schemas.PK{pk} + session.engine.logger.Debugf("[cache] cache ids: %s, %v", newsql, ids) + err = caches.PutCacheSql(cacher, ids, tableName, newsql, args) + if err != nil { + return false, err + } + } else { + session.engine.logger.Debugf("[cache] cache hit: %s, %v", newsql, ids) + } + + if len(ids) > 0 { + structValue := reflect.Indirect(reflect.ValueOf(bean)) + id := ids[0] + session.engine.logger.Debugf("[cache] get bean: %s, %v", tableName, id) + sid, err := id.ToString() + if err != nil { + return false, err + } + cacheBean := cacher.GetBean(tableName, sid) + if cacheBean == nil { + cacheBean = bean + has, err = session.nocacheGet(reflect.Struct, table, cacheBean, sqlStr, args...) + if err != nil || !has { + return has, err + } + + session.engine.logger.Debugf("[cache] cache bean: %s, %v, %v", tableName, id, cacheBean) + cacher.PutBean(tableName, sid, cacheBean) + } else { + session.engine.logger.Debugf("[cache] cache hit: %s, %v, %v", tableName, id, cacheBean) + has = true + } + structValue.Set(reflect.Indirect(reflect.ValueOf(cacheBean))) + + return has, nil + } + return false, nil +} diff --git a/vendor/xorm.io/xorm/session_insert.go b/vendor/xorm.io/xorm/session_insert.go new file mode 100644 index 0000000..5f96815 --- /dev/null +++ b/vendor/xorm.io/xorm/session_insert.go @@ -0,0 +1,643 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +// ErrNoElementsOnSlice represents an error there is no element when insert +var ErrNoElementsOnSlice = errors.New("No element on slice when insert") + +// Insert insert one or more beans +func (session *Session) Insert(beans ...interface{}) (int64, error) { + var affected int64 + var err error + + if session.isAutoClose { + defer session.Close() + } + + session.autoResetStatement = false + defer func() { + session.autoResetStatement = true + session.resetStatement() + }() + + for _, bean := range beans { + switch bean.(type) { + case map[string]interface{}: + cnt, err := session.insertMapInterface(bean.(map[string]interface{})) + if err != nil { + return affected, err + } + affected += cnt + case []map[string]interface{}: + s := bean.([]map[string]interface{}) + for i := 0; i < len(s); i++ { + cnt, err := session.insertMapInterface(s[i]) + if err != nil { + return affected, err + } + affected += cnt + } + case map[string]string: + cnt, err := session.insertMapString(bean.(map[string]string)) + if err != nil { + return affected, err + } + affected += cnt + case []map[string]string: + s := bean.([]map[string]string) + for i := 0; i < len(s); i++ { + cnt, err := session.insertMapString(s[i]) + if err != nil { + return affected, err + } + affected += cnt + } + default: + sliceValue := reflect.Indirect(reflect.ValueOf(bean)) + if sliceValue.Kind() == reflect.Slice { + size := sliceValue.Len() + if size <= 0 { + return 0, ErrNoElementsOnSlice + } + + cnt, err := session.innerInsertMulti(bean) + if err != nil { + return affected, err + } + affected += cnt + } else { + cnt, err := session.innerInsert(bean) + if err != nil { + return affected, err + } + affected += cnt + } + } + } + + return affected, err +} + +func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error) { + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + if sliceValue.Kind() != reflect.Slice { + return 0, errors.New("needs a pointer to a slice") + } + + if sliceValue.Len() <= 0 { + return 0, errors.New("could not insert a empty slice") + } + + if err := session.statement.SetRefBean(sliceValue.Index(0).Interface()); err != nil { + return 0, err + } + + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + var ( + table = session.statement.RefTable + size = sliceValue.Len() + colNames []string + colMultiPlaces []string + args []interface{} + cols []*schemas.Column + ) + + for i := 0; i < size; i++ { + v := sliceValue.Index(i) + var vv reflect.Value + switch v.Kind() { + case reflect.Interface: + vv = reflect.Indirect(v.Elem()) + default: + vv = reflect.Indirect(v) + } + elemValue := v.Interface() + var colPlaces []string + + // handle BeforeInsertProcessor + // !nashtsai! does user expect it's same slice to passed closure when using Before()/After() when insert multi?? + for _, closure := range session.beforeClosures { + closure(elemValue) + } + + if processor, ok := interface{}(elemValue).(BeforeInsertProcessor); ok { + processor.BeforeInsert() + } + // -- + + for _, col := range table.Columns() { + ptrFieldValue, err := col.ValueOfV(&vv) + if err != nil { + return 0, err + } + fieldValue := *ptrFieldValue + if col.IsAutoIncrement && utils.IsZero(fieldValue.Interface()) { + continue + } + if col.MapType == schemas.ONLYFROMDB { + continue + } + if col.IsDeleted { + continue + } + if session.statement.OmitColumnMap.Contain(col.Name) { + continue + } + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { + continue + } + if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime { + val, t := session.engine.nowTime(col) + args = append(args, val) + + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } else if col.IsVersion && session.statement.CheckVersion { + args = append(args, 1) + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnInt(bean, col, 1) + }) + } else { + arg, err := session.statement.Value2Interface(col, fieldValue) + if err != nil { + return 0, err + } + args = append(args, arg) + } + + if i == 0 { + colNames = append(colNames, col.Name) + cols = append(cols, col) + } + colPlaces = append(colPlaces, "?") + } + + colMultiPlaces = append(colMultiPlaces, strings.Join(colPlaces, ", ")) + } + cleanupProcessorsClosures(&session.beforeClosures) + + quoter := session.engine.dialect.Quoter() + var sql string + colStr := quoter.Join(colNames, ",") + if session.engine.dialect.URI().DBType == schemas.ORACLE { + temp := fmt.Sprintf(") INTO %s (%v) VALUES (", + quoter.Quote(tableName), + colStr) + sql = fmt.Sprintf("INSERT ALL INTO %s (%v) VALUES (%v) SELECT 1 FROM DUAL", + quoter.Quote(tableName), + colStr, + strings.Join(colMultiPlaces, temp)) + } else { + sql = fmt.Sprintf("INSERT INTO %s (%v) VALUES (%v)", + quoter.Quote(tableName), + colStr, + strings.Join(colMultiPlaces, "),(")) + } + res, err := session.exec(sql, args...) + if err != nil { + return 0, err + } + + session.cacheInsert(tableName) + + lenAfterClosures := len(session.afterClosures) + for i := 0; i < size; i++ { + elemValue := reflect.Indirect(sliceValue.Index(i)).Addr().Interface() + + // handle AfterInsertProcessor + if session.isAutoCommit { + // !nashtsai! does user expect it's same slice to passed closure when using Before()/After() when insert multi?? + for _, closure := range session.afterClosures { + closure(elemValue) + } + if processor, ok := elemValue.(AfterInsertProcessor); ok { + processor.AfterInsert() + } + } else { + if lenAfterClosures > 0 { + if value, has := session.afterInsertBeans[elemValue]; has && value != nil { + *value = append(*value, session.afterClosures...) + } else { + afterClosures := make([]func(interface{}), lenAfterClosures) + copy(afterClosures, session.afterClosures) + session.afterInsertBeans[elemValue] = &afterClosures + } + } else { + if _, ok := elemValue.(AfterInsertProcessor); ok { + session.afterInsertBeans[elemValue] = nil + } + } + } + } + + cleanupProcessorsClosures(&session.afterClosures) + return res.RowsAffected() +} + +// InsertMulti insert multiple records +func (session *Session) InsertMulti(rowsSlicePtr interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + if sliceValue.Kind() != reflect.Slice { + return 0, ErrPtrSliceType + } + + if sliceValue.Len() <= 0 { + return 0, ErrNoElementsOnSlice + } + + return session.innerInsertMulti(rowsSlicePtr) +} + +func (session *Session) innerInsert(bean interface{}) (int64, error) { + if err := session.statement.SetRefBean(bean); err != nil { + return 0, err + } + if len(session.statement.TableName()) <= 0 { + return 0, ErrTableNotFound + } + + // handle BeforeInsertProcessor + for _, closure := range session.beforeClosures { + closure(bean) + } + cleanupProcessorsClosures(&session.beforeClosures) // cleanup after used + + if processor, ok := interface{}(bean).(BeforeInsertProcessor); ok { + processor.BeforeInsert() + } + + var tableName = session.statement.TableName() + table := session.statement.RefTable + + colNames, args, err := session.genInsertColumns(bean) + if err != nil { + return 0, err + } + + sqlStr, args, err := session.statement.GenInsertSQL(colNames, args) + if err != nil { + return 0, err + } + + handleAfterInsertProcessorFunc := func(bean interface{}) { + if session.isAutoCommit { + for _, closure := range session.afterClosures { + closure(bean) + } + if processor, ok := interface{}(bean).(AfterInsertProcessor); ok { + processor.AfterInsert() + } + } else { + lenAfterClosures := len(session.afterClosures) + if lenAfterClosures > 0 { + if value, has := session.afterInsertBeans[bean]; has && value != nil { + *value = append(*value, session.afterClosures...) + } else { + afterClosures := make([]func(interface{}), lenAfterClosures) + copy(afterClosures, session.afterClosures) + session.afterInsertBeans[bean] = &afterClosures + } + + } else { + if _, ok := interface{}(bean).(AfterInsertProcessor); ok { + session.afterInsertBeans[bean] = nil + } + } + } + cleanupProcessorsClosures(&session.afterClosures) // cleanup after used + } + + // for postgres, many of them didn't implement lastInsertId, so we should + // implemented it ourself. + if session.engine.dialect.URI().DBType == schemas.ORACLE && len(table.AutoIncrement) > 0 { + res, err := session.queryBytes("select seq_atable.currval from dual", args...) + if err != nil { + return 0, err + } + + defer handleAfterInsertProcessorFunc(bean) + + session.cacheInsert(tableName) + + if table.Version != "" && session.statement.CheckVersion { + verValue, err := table.VersionColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } else if verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) + } + } + + if len(res) < 1 { + return 0, errors.New("insert no error but not returned id") + } + + idByte := res[0][table.AutoIncrement] + id, err := strconv.ParseInt(string(idByte), 10, 64) + if err != nil || id <= 0 { + return 1, err + } + + aiValue, err := table.AutoIncrColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } + + if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { + return 1, nil + } + + aiValue.Set(int64ToIntValue(id, aiValue.Type())) + + return 1, nil + } else if len(table.AutoIncrement) > 0 && (session.engine.dialect.URI().DBType == schemas.POSTGRES || + session.engine.dialect.URI().DBType == schemas.MSSQL) { + res, err := session.queryBytes(sqlStr, args...) + + if err != nil { + return 0, err + } + defer handleAfterInsertProcessorFunc(bean) + + session.cacheInsert(tableName) + + if table.Version != "" && session.statement.CheckVersion { + verValue, err := table.VersionColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } else if verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) + } + } + + if len(res) < 1 { + return 0, errors.New("insert successfully but not returned id") + } + + idByte := res[0][table.AutoIncrement] + id, err := strconv.ParseInt(string(idByte), 10, 64) + if err != nil || id <= 0 { + return 1, err + } + + aiValue, err := table.AutoIncrColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } + + if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { + return 1, nil + } + + aiValue.Set(int64ToIntValue(id, aiValue.Type())) + + return 1, nil + } + + res, err := session.exec(sqlStr, args...) + if err != nil { + return 0, err + } + + defer handleAfterInsertProcessorFunc(bean) + + session.cacheInsert(tableName) + + if table.Version != "" && session.statement.CheckVersion { + verValue, err := table.VersionColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } else if verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) + } + } + + if table.AutoIncrement == "" { + return res.RowsAffected() + } + + var id int64 + id, err = res.LastInsertId() + if err != nil || id <= 0 { + return res.RowsAffected() + } + + aiValue, err := table.AutoIncrColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } + + if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { + return res.RowsAffected() + } + + aiValue.Set(int64ToIntValue(id, aiValue.Type())) + + return res.RowsAffected() +} + +// InsertOne insert only one struct into database as a record. +// The in parameter bean must a struct or a point to struct. The return +// parameter is inserted and error +func (session *Session) InsertOne(bean interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + return session.innerInsert(bean) +} + +func (session *Session) cacheInsert(table string) error { + if !session.statement.UseCache { + return nil + } + cacher := session.engine.cacherMgr.GetCacher(table) + if cacher == nil { + return nil + } + session.engine.logger.Debugf("[cache] clear SQL: %v", table) + cacher.ClearIds(table) + return nil +} + +// genInsertColumns generates insert needed columns +func (session *Session) genInsertColumns(bean interface{}) ([]string, []interface{}, error) { + table := session.statement.RefTable + colNames := make([]string, 0, len(table.ColumnsSeq())) + args := make([]interface{}, 0, len(table.ColumnsSeq())) + + for _, col := range table.Columns() { + if col.MapType == schemas.ONLYFROMDB { + continue + } + + if col.IsDeleted { + continue + } + + if session.statement.OmitColumnMap.Contain(col.Name) { + continue + } + + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { + continue + } + + if session.statement.IncrColumns.IsColExist(col.Name) { + continue + } else if session.statement.DecrColumns.IsColExist(col.Name) { + continue + } else if session.statement.ExprColumns.IsColExist(col.Name) { + continue + } + + fieldValuePtr, err := col.ValueOf(bean) + if err != nil { + return nil, nil, err + } + fieldValue := *fieldValuePtr + + if col.IsAutoIncrement && utils.IsValueZero(fieldValue) { + continue + } + + // !evalphobia! set fieldValue as nil when column is nullable and zero-value + if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { + if col.Nullable && utils.IsValueZero(fieldValue) { + var nilValue *int + fieldValue = reflect.ValueOf(nilValue) + } + } + + if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ { + // if time is non-empty, then set to auto time + val, t := session.engine.nowTime(col) + args = append(args, val) + + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } else if col.IsVersion && session.statement.CheckVersion { + args = append(args, 1) + } else { + arg, err := session.statement.Value2Interface(col, fieldValue) + if err != nil { + return colNames, args, err + } + args = append(args, arg) + } + + colNames = append(colNames, col.Name) + } + return colNames, args, nil +} + +func (session *Session) insertMapInterface(m map[string]interface{}) (int64, error) { + if len(m) == 0 { + return 0, ErrParamsType + } + + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + var columns = make([]string, 0, len(m)) + exprs := session.statement.ExprColumns + for k := range m { + if !exprs.IsColExist(k) { + columns = append(columns, k) + } + } + sort.Strings(columns) + + var args = make([]interface{}, 0, len(m)) + for _, colName := range columns { + args = append(args, m[colName]) + } + + return session.insertMap(columns, args) +} + +func (session *Session) insertMapString(m map[string]string) (int64, error) { + if len(m) == 0 { + return 0, ErrParamsType + } + + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + var columns = make([]string, 0, len(m)) + exprs := session.statement.ExprColumns + for k := range m { + if !exprs.IsColExist(k) { + columns = append(columns, k) + } + } + + sort.Strings(columns) + + var args = make([]interface{}, 0, len(m)) + for _, colName := range columns { + args = append(args, m[colName]) + } + + return session.insertMap(columns, args) +} + +func (session *Session) insertMap(columns []string, args []interface{}) (int64, error) { + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + sql, args, err := session.statement.GenInsertMapSQL(columns, args) + if err != nil { + return 0, err + } + + if err := session.cacheInsert(tableName); err != nil { + return 0, err + } + + res, err := session.exec(sql, args...) + if err != nil { + return 0, err + } + affected, err := res.RowsAffected() + if err != nil { + return 0, err + } + return affected, nil +} diff --git a/vendor/xorm.io/xorm/session_iterate.go b/vendor/xorm.io/xorm/session_iterate.go new file mode 100644 index 0000000..8cab8f4 --- /dev/null +++ b/vendor/xorm.io/xorm/session_iterate.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "reflect" + + "xorm.io/xorm/internal/utils" +) + +// IterFunc only use by Iterate +type IterFunc func(idx int, bean interface{}) error + +// Rows return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields +// are conditions. +func (session *Session) Rows(bean interface{}) (*Rows, error) { + return newRows(session, bean) +} + +// Iterate record by record handle records from table, condiBeans's non-empty fields +// are conditions. beans could be []Struct, []*Struct, map[int64]Struct +// map[int64]*Struct +func (session *Session) Iterate(bean interface{}, fun IterFunc) error { + if session.isAutoClose { + defer session.Close() + } + + if session.statement.LastError != nil { + return session.statement.LastError + } + + if session.statement.BufferSize > 0 { + return session.bufferIterate(bean, fun) + } + + rows, err := session.Rows(bean) + if err != nil { + return err + } + defer rows.Close() + + i := 0 + for rows.Next() { + b := reflect.New(rows.beanType).Interface() + err = rows.Scan(b) + if err != nil { + return err + } + err = fun(i, b) + if err != nil { + return err + } + i++ + } + return err +} + +// BufferSize sets the buffersize for iterate +func (session *Session) BufferSize(size int) *Session { + session.statement.BufferSize = size + return session +} + +func (session *Session) bufferIterate(bean interface{}, fun IterFunc) error { + var bufferSize = session.statement.BufferSize + var pLimitN = session.statement.LimitN + if pLimitN != nil && bufferSize > *pLimitN { + bufferSize = *pLimitN + } + var start = session.statement.Start + v := utils.ReflectValue(bean) + sliceType := reflect.SliceOf(v.Type()) + var idx = 0 + session.autoResetStatement = false + defer func() { + session.autoResetStatement = true + }() + + for bufferSize > 0 { + slice := reflect.New(sliceType) + if err := session.NoCache().Limit(bufferSize, start).find(slice.Interface(), bean); err != nil { + return err + } + + for i := 0; i < slice.Elem().Len(); i++ { + if err := fun(idx, slice.Elem().Index(i).Addr().Interface()); err != nil { + return err + } + idx++ + } + + if bufferSize > slice.Elem().Len() { + break + } + + start = start + slice.Elem().Len() + if pLimitN != nil && start+bufferSize > *pLimitN { + bufferSize = *pLimitN - start + } + } + + return nil +} diff --git a/vendor/xorm.io/xorm/session_query.go b/vendor/xorm.io/xorm/session_query.go new file mode 100644 index 0000000..1213646 --- /dev/null +++ b/vendor/xorm.io/xorm/session_query.go @@ -0,0 +1,257 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "fmt" + "reflect" + "strconv" + "time" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +// Query runs a raw sql and return records as []map[string][]byte +func (session *Session) Query(sqlOrArgs ...interface{}) ([]map[string][]byte, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) + if err != nil { + return nil, err + } + + return session.queryBytes(sqlStr, args...) +} + +func value2String(rawValue *reflect.Value) (str string, err error) { + aa := reflect.TypeOf((*rawValue).Interface()) + vv := reflect.ValueOf((*rawValue).Interface()) + switch aa.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + str = strconv.FormatInt(vv.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + str = strconv.FormatUint(vv.Uint(), 10) + case reflect.Float32, reflect.Float64: + str = strconv.FormatFloat(vv.Float(), 'f', -1, 64) + case reflect.String: + str = vv.String() + case reflect.Array, reflect.Slice: + switch aa.Elem().Kind() { + case reflect.Uint8: + data := rawValue.Interface().([]byte) + str = string(data) + if str == "\x00" { + str = "0" + } + default: + err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name()) + } + // time type + case reflect.Struct: + if aa.ConvertibleTo(schemas.TimeType) { + str = vv.Convert(schemas.TimeType).Interface().(time.Time).Format(time.RFC3339Nano) + } else { + err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name()) + } + case reflect.Bool: + str = strconv.FormatBool(vv.Bool()) + case reflect.Complex128, reflect.Complex64: + str = fmt.Sprintf("%v", vv.Complex()) + /* TODO: unsupported types below + case reflect.Map: + case reflect.Ptr: + case reflect.Uintptr: + case reflect.UnsafePointer: + case reflect.Chan, reflect.Func, reflect.Interface: + */ + default: + err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name()) + } + return +} + +func row2mapStr(rows *core.Rows, fields []string) (resultsMap map[string]string, err error) { + result := make(map[string]string) + scanResultContainers := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var scanResultContainer interface{} + scanResultContainers[i] = &scanResultContainer + } + if err := rows.Scan(scanResultContainers...); err != nil { + return nil, err + } + + for ii, key := range fields { + rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii])) + // if row is null then as empty string + if rawValue.Interface() == nil { + result[key] = "" + continue + } + + if data, err := value2String(&rawValue); err == nil { + result[key] = data + } else { + return nil, err + } + } + return result, nil +} + +func row2sliceStr(rows *core.Rows, fields []string) (results []string, err error) { + result := make([]string, 0, len(fields)) + scanResultContainers := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var scanResultContainer interface{} + scanResultContainers[i] = &scanResultContainer + } + if err := rows.Scan(scanResultContainers...); err != nil { + return nil, err + } + + for i := 0; i < len(fields); i++ { + rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[i])) + // if row is null then as empty string + if rawValue.Interface() == nil { + result = append(result, "") + continue + } + + if data, err := value2String(&rawValue); err == nil { + result = append(result, data) + } else { + return nil, err + } + } + return result, nil +} + +func rows2Strings(rows *core.Rows) (resultsSlice []map[string]string, err error) { + fields, err := rows.Columns() + if err != nil { + return nil, err + } + for rows.Next() { + result, err := row2mapStr(rows, fields) + if err != nil { + return nil, err + } + resultsSlice = append(resultsSlice, result) + } + + return resultsSlice, nil +} + +func rows2SliceString(rows *core.Rows) (resultsSlice [][]string, err error) { + fields, err := rows.Columns() + if err != nil { + return nil, err + } + for rows.Next() { + record, err := row2sliceStr(rows, fields) + if err != nil { + return nil, err + } + resultsSlice = append(resultsSlice, record) + } + + return resultsSlice, nil +} + +// QueryString runs a raw sql and return records as []map[string]string +func (session *Session) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) + if err != nil { + return nil, err + } + + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + return rows2Strings(rows) +} + +// QuerySliceString runs a raw sql and return records as [][]string +func (session *Session) QuerySliceString(sqlOrArgs ...interface{}) ([][]string, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) + if err != nil { + return nil, err + } + + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + return rows2SliceString(rows) +} + +func row2mapInterface(rows *core.Rows, fields []string) (resultsMap map[string]interface{}, err error) { + resultsMap = make(map[string]interface{}, len(fields)) + scanResultContainers := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var scanResultContainer interface{} + scanResultContainers[i] = &scanResultContainer + } + if err := rows.Scan(scanResultContainers...); err != nil { + return nil, err + } + + for ii, key := range fields { + resultsMap[key] = reflect.Indirect(reflect.ValueOf(scanResultContainers[ii])).Interface() + } + return +} + +func rows2Interfaces(rows *core.Rows) (resultsSlice []map[string]interface{}, err error) { + fields, err := rows.Columns() + if err != nil { + return nil, err + } + for rows.Next() { + result, err := row2mapInterface(rows, fields) + if err != nil { + return nil, err + } + resultsSlice = append(resultsSlice, result) + } + + return resultsSlice, nil +} + +// QueryInterface runs a raw sql and return records as []map[string]interface{} +func (session *Session) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) + if err != nil { + return nil, err + } + + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + return rows2Interfaces(rows) +} diff --git a/vendor/xorm.io/xorm/session_raw.go b/vendor/xorm.io/xorm/session_raw.go new file mode 100644 index 0000000..4cfe297 --- /dev/null +++ b/vendor/xorm.io/xorm/session_raw.go @@ -0,0 +1,180 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "reflect" + + "xorm.io/xorm/core" +) + +func (session *Session) queryPreprocess(sqlStr *string, paramStr ...interface{}) { + for _, filter := range session.engine.dialect.Filters() { + *sqlStr = filter.Do(*sqlStr) + } + + session.lastSQL = *sqlStr + session.lastSQLArgs = paramStr +} + +func (session *Session) queryRows(sqlStr string, args ...interface{}) (*core.Rows, error) { + defer session.resetStatement() + if session.statement.LastError != nil { + return nil, session.statement.LastError + } + + session.queryPreprocess(&sqlStr, args...) + + session.lastSQL = sqlStr + session.lastSQLArgs = args + + if session.isAutoCommit { + var db *core.DB + if session.sessionType == groupSession { + db = session.engine.engineGroup.Slave().DB() + } else { + db = session.DB() + } + + if session.prepareStmt { + // don't clear stmt since session will cache them + stmt, err := session.doPrepare(db, sqlStr) + if err != nil { + return nil, err + } + + rows, err := stmt.QueryContext(session.ctx, args...) + if err != nil { + return nil, err + } + return rows, nil + } + + rows, err := db.QueryContext(session.ctx, sqlStr, args...) + if err != nil { + return nil, err + } + return rows, nil + } + + rows, err := session.tx.QueryContext(session.ctx, sqlStr, args...) + if err != nil { + return nil, err + } + return rows, nil +} + +func (session *Session) queryRow(sqlStr string, args ...interface{}) *core.Row { + return core.NewRow(session.queryRows(sqlStr, args...)) +} + +func value2Bytes(rawValue *reflect.Value) ([]byte, error) { + str, err := value2String(rawValue) + if err != nil { + return nil, err + } + return []byte(str), nil +} + +func row2map(rows *core.Rows, fields []string) (resultsMap map[string][]byte, err error) { + result := make(map[string][]byte) + scanResultContainers := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var scanResultContainer interface{} + scanResultContainers[i] = &scanResultContainer + } + if err := rows.Scan(scanResultContainers...); err != nil { + return nil, err + } + + for ii, key := range fields { + rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii])) + //if row is null then ignore + if rawValue.Interface() == nil { + result[key] = []byte{} + continue + } + + if data, err := value2Bytes(&rawValue); err == nil { + result[key] = data + } else { + return nil, err // !nashtsai! REVIEW, should return err or just error log? + } + } + return result, nil +} + +func rows2maps(rows *core.Rows) (resultsSlice []map[string][]byte, err error) { + fields, err := rows.Columns() + if err != nil { + return nil, err + } + for rows.Next() { + result, err := row2map(rows, fields) + if err != nil { + return nil, err + } + resultsSlice = append(resultsSlice, result) + } + + return resultsSlice, nil +} + +func (session *Session) queryBytes(sqlStr string, args ...interface{}) ([]map[string][]byte, error) { + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + return rows2maps(rows) +} + +func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, error) { + defer session.resetStatement() + + session.queryPreprocess(&sqlStr, args...) + + session.lastSQL = sqlStr + session.lastSQLArgs = args + + if !session.isAutoCommit { + return session.tx.ExecContext(session.ctx, sqlStr, args...) + } + + if session.prepareStmt { + stmt, err := session.doPrepare(session.DB(), sqlStr) + if err != nil { + return nil, err + } + + res, err := stmt.ExecContext(session.ctx, args...) + if err != nil { + return nil, err + } + return res, nil + } + + return session.DB().ExecContext(session.ctx, sqlStr, args...) +} + +// Exec raw sql +func (session *Session) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { + if session.isAutoClose { + defer session.Close() + } + + if len(sqlOrArgs) == 0 { + return nil, ErrUnSupportedType + } + + sqlStr, args, err := session.statement.ConvertSQLOrArgs(sqlOrArgs...) + if err != nil { + return nil, err + } + + return session.exec(sqlStr, args...) +} diff --git a/vendor/xorm.io/xorm/session_schema.go b/vendor/xorm.io/xorm/session_schema.go new file mode 100644 index 0000000..9ccf8ab --- /dev/null +++ b/vendor/xorm.io/xorm/session_schema.go @@ -0,0 +1,490 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "bufio" + "database/sql" + "fmt" + "io" + "os" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +// Ping test if database is ok +func (session *Session) Ping() error { + if session.isAutoClose { + defer session.Close() + } + + session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) + return session.DB().PingContext(session.ctx) +} + +// CreateTable create a table according a bean +func (session *Session) CreateTable(bean interface{}) error { + if session.isAutoClose { + defer session.Close() + } + + return session.createTable(bean) +} + +func (session *Session) createTable(bean interface{}) error { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + sqlStrs := session.statement.GenCreateTableSQL() + for _, s := range sqlStrs { + _, err := session.exec(s) + if err != nil { + return err + } + } + return nil +} + +// CreateIndexes create indexes +func (session *Session) CreateIndexes(bean interface{}) error { + if session.isAutoClose { + defer session.Close() + } + + return session.createIndexes(bean) +} + +func (session *Session) createIndexes(bean interface{}) error { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + sqls := session.statement.GenIndexSQL() + for _, sqlStr := range sqls { + _, err := session.exec(sqlStr) + if err != nil { + return err + } + } + return nil +} + +// CreateUniques create uniques +func (session *Session) CreateUniques(bean interface{}) error { + if session.isAutoClose { + defer session.Close() + } + return session.createUniques(bean) +} + +func (session *Session) createUniques(bean interface{}) error { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + sqls := session.statement.GenUniqueSQL() + for _, sqlStr := range sqls { + _, err := session.exec(sqlStr) + if err != nil { + return err + } + } + return nil +} + +// DropIndexes drop indexes +func (session *Session) DropIndexes(bean interface{}) error { + if session.isAutoClose { + defer session.Close() + } + + return session.dropIndexes(bean) +} + +func (session *Session) dropIndexes(bean interface{}) error { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + sqls := session.statement.GenDelIndexSQL() + for _, sqlStr := range sqls { + _, err := session.exec(sqlStr) + if err != nil { + return err + } + } + return nil +} + +// DropTable drop table will drop table if exist, if drop failed, it will return error +func (session *Session) DropTable(beanOrTableName interface{}) error { + if session.isAutoClose { + defer session.Close() + } + + return session.dropTable(beanOrTableName) +} + +func (session *Session) dropTable(beanOrTableName interface{}) error { + tableName := session.engine.TableName(beanOrTableName) + sqlStr, checkIfExist := session.engine.dialect.DropTableSQL(session.engine.TableName(tableName, true)) + if !checkIfExist { + exist, err := session.engine.dialect.IsTableExist(session.getQueryer(), session.ctx, tableName) + if err != nil { + return err + } + checkIfExist = exist + } + + if checkIfExist { + _, err := session.exec(sqlStr) + return err + } + return nil +} + +// IsTableExist if a table is exist +func (session *Session) IsTableExist(beanOrTableName interface{}) (bool, error) { + if session.isAutoClose { + defer session.Close() + } + + tableName := session.engine.TableName(beanOrTableName) + + return session.isTableExist(tableName) +} + +func (session *Session) isTableExist(tableName string) (bool, error) { + return session.engine.dialect.IsTableExist(session.getQueryer(), session.ctx, tableName) +} + +// IsTableEmpty if table have any records +func (session *Session) IsTableEmpty(bean interface{}) (bool, error) { + if session.isAutoClose { + defer session.Close() + } + return session.isTableEmpty(session.engine.TableName(bean)) +} + +func (session *Session) isTableEmpty(tableName string) (bool, error) { + var total int64 + sqlStr := fmt.Sprintf("select count(*) from %s", session.engine.Quote(session.engine.TableName(tableName, true))) + err := session.queryRow(sqlStr).Scan(&total) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return true, err + } + + return total == 0, nil +} + +// find if index is exist according cols +func (session *Session) isIndexExist2(tableName string, cols []string, unique bool) (bool, error) { + indexes, err := session.engine.dialect.GetIndexes(session.getQueryer(), session.ctx, tableName) + if err != nil { + return false, err + } + + for _, index := range indexes { + if utils.SliceEq(index.Cols, cols) { + if unique { + return index.Type == schemas.UniqueType, nil + } + return index.Type == schemas.IndexType, nil + } + } + return false, nil +} + +func (session *Session) addColumn(colName string) error { + col := session.statement.RefTable.GetColumn(colName) + sql := session.engine.dialect.AddColumnSQL(session.statement.TableName(), col) + _, err := session.exec(sql) + return err +} + +func (session *Session) addIndex(tableName, idxName string) error { + index := session.statement.RefTable.Indexes[idxName] + sqlStr := session.engine.dialect.CreateIndexSQL(tableName, index) + _, err := session.exec(sqlStr) + return err +} + +func (session *Session) addUnique(tableName, uqeName string) error { + index := session.statement.RefTable.Indexes[uqeName] + sqlStr := session.engine.dialect.CreateIndexSQL(tableName, index) + _, err := session.exec(sqlStr) + return err +} + +// Sync2 synchronize structs to database tables +func (session *Session) Sync2(beans ...interface{}) error { + engine := session.engine + + if session.isAutoClose { + session.isAutoClose = false + defer session.Close() + } + + tables, err := engine.dialect.GetTables(session.getQueryer(), session.ctx) + if err != nil { + return err + } + + session.autoResetStatement = false + defer func() { + session.autoResetStatement = true + session.resetStatement() + }() + + for _, bean := range beans { + v := utils.ReflectValue(bean) + table, err := engine.tagParser.ParseWithCache(v) + if err != nil { + return err + } + var tbName string + if len(session.statement.AltTableName) > 0 { + tbName = session.statement.AltTableName + } else { + tbName = engine.TableName(bean) + } + tbNameWithSchema := engine.tbNameWithSchema(tbName) + + var oriTable *schemas.Table + for _, tb := range tables { + if strings.EqualFold(engine.tbNameWithSchema(tb.Name), engine.tbNameWithSchema(tbName)) { + oriTable = tb + break + } + } + + // this is a new table + if oriTable == nil { + err = session.StoreEngine(session.statement.StoreEngine).createTable(bean) + if err != nil { + return err + } + + err = session.createUniques(bean) + if err != nil { + return err + } + + err = session.createIndexes(bean) + if err != nil { + return err + } + continue + } + + // this will modify an old table + if err = engine.loadTableInfo(oriTable); err != nil { + return err + } + + // check columns + for _, col := range table.Columns() { + var oriCol *schemas.Column + for _, col2 := range oriTable.Columns() { + if strings.EqualFold(col.Name, col2.Name) { + oriCol = col2 + break + } + } + + // column is not exist on table + if oriCol == nil { + session.statement.RefTable = table + session.statement.SetTableName(tbNameWithSchema) + if err = session.addColumn(col.Name); err != nil { + return err + } + continue + } + + err = nil + expectedType := engine.dialect.SQLType(col) + curType := engine.dialect.SQLType(oriCol) + if expectedType != curType { + if expectedType == schemas.Text && + strings.HasPrefix(curType, schemas.Varchar) { + // currently only support mysql & postgres + if engine.dialect.URI().DBType == schemas.MYSQL || + engine.dialect.URI().DBType == schemas.POSTGRES { + engine.logger.Infof("Table %s column %s change type from %s to %s\n", + tbNameWithSchema, col.Name, curType, expectedType) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) + } else { + engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s\n", + tbNameWithSchema, col.Name, curType, expectedType) + } + } else if strings.HasPrefix(curType, schemas.Varchar) && strings.HasPrefix(expectedType, schemas.Varchar) { + if engine.dialect.URI().DBType == schemas.MYSQL { + if oriCol.Length < col.Length { + engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n", + tbNameWithSchema, col.Name, oriCol.Length, col.Length) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) + } + } + } else { + if !(strings.HasPrefix(curType, expectedType) && curType[len(expectedType)] == '(') { + engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s", + tbNameWithSchema, col.Name, curType, expectedType) + } + } + } else if expectedType == schemas.Varchar { + if engine.dialect.URI().DBType == schemas.MYSQL { + if oriCol.Length < col.Length { + engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n", + tbNameWithSchema, col.Name, oriCol.Length, col.Length) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) + } + } + } + + if col.Default != oriCol.Default { + switch { + case col.IsAutoIncrement: // For autoincrement column, don't check default + case (col.SQLType.Name == schemas.Bool || col.SQLType.Name == schemas.Boolean) && + ((strings.EqualFold(col.Default, "true") && oriCol.Default == "1") || + (strings.EqualFold(col.Default, "false") && oriCol.Default == "0")): + default: + engine.logger.Warnf("Table %s Column %s db default is %s, struct default is %s", + tbName, col.Name, oriCol.Default, col.Default) + } + } + if col.Nullable != oriCol.Nullable { + engine.logger.Warnf("Table %s Column %s db nullable is %v, struct nullable is %v", + tbName, col.Name, oriCol.Nullable, col.Nullable) + } + + if err != nil { + return err + } + } + + var foundIndexNames = make(map[string]bool) + var addedNames = make(map[string]*schemas.Index) + + for name, index := range table.Indexes { + var oriIndex *schemas.Index + for name2, index2 := range oriTable.Indexes { + if index.Equal(index2) { + oriIndex = index2 + foundIndexNames[name2] = true + break + } + } + + if oriIndex != nil { + if oriIndex.Type != index.Type { + sql := engine.dialect.DropIndexSQL(tbNameWithSchema, oriIndex) + _, err = session.exec(sql) + if err != nil { + return err + } + oriIndex = nil + } + } + + if oriIndex == nil { + addedNames[name] = index + } + } + + for name2, index2 := range oriTable.Indexes { + if _, ok := foundIndexNames[name2]; !ok { + sql := engine.dialect.DropIndexSQL(tbNameWithSchema, index2) + _, err = session.exec(sql) + if err != nil { + return err + } + } + } + + for name, index := range addedNames { + if index.Type == schemas.UniqueType { + session.statement.RefTable = table + session.statement.SetTableName(tbNameWithSchema) + err = session.addUnique(tbNameWithSchema, name) + } else if index.Type == schemas.IndexType { + session.statement.RefTable = table + session.statement.SetTableName(tbNameWithSchema) + err = session.addIndex(tbNameWithSchema, name) + } + if err != nil { + return err + } + } + + // check all the columns which removed from struct fields but left on database tables. + for _, colName := range oriTable.ColumnsSeq() { + if table.GetColumn(colName) == nil { + engine.logger.Warnf("Table %s has column %s but struct has not related field", engine.TableName(oriTable.Name, true), colName) + } + } + } + + return nil +} + +// ImportFile SQL DDL file +func (session *Session) ImportFile(ddlPath string) ([]sql.Result, error) { + file, err := os.Open(ddlPath) + if err != nil { + return nil, err + } + defer file.Close() + return session.Import(file) +} + +// Import SQL DDL from io.Reader +func (session *Session) Import(r io.Reader) ([]sql.Result, error) { + var results []sql.Result + var lastError error + scanner := bufio.NewScanner(r) + + var inSingleQuote bool + semiColSpliter := func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + for i, b := range data { + if b == '\'' { + inSingleQuote = !inSingleQuote + } + if !inSingleQuote && b == ';' { + return i + 1, data[0:i], nil + } + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil + } + + scanner.Split(semiColSpliter) + + for scanner.Scan() { + query := strings.Trim(scanner.Text(), " \t\n\r") + if len(query) > 0 { + result, err := session.Exec(query) + results = append(results, result) + if err != nil { + return nil, err + } + } + } + + return results, lastError +} diff --git a/vendor/xorm.io/xorm/session_stats.go b/vendor/xorm.io/xorm/session_stats.go new file mode 100644 index 0000000..17d0a67 --- /dev/null +++ b/vendor/xorm.io/xorm/session_stats.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "errors" + "reflect" +) + +// Count counts the records. bean's non-empty fields +// are conditions. +func (session *Session) Count(bean ...interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenCountSQL(bean...) + if err != nil { + return 0, err + } + + var total int64 + err = session.queryRow(sqlStr, args...).Scan(&total) + if err == sql.ErrNoRows || err == nil { + return total, nil + } + + return 0, err +} + +// sum call sum some column. bean's non-empty fields are conditions. +func (session *Session) sum(res interface{}, bean interface{}, columnNames ...string) error { + if session.isAutoClose { + defer session.Close() + } + + v := reflect.ValueOf(res) + if v.Kind() != reflect.Ptr { + return errors.New("need a pointer to a variable") + } + + sqlStr, args, err := session.statement.GenSumSQL(bean, columnNames...) + if err != nil { + return err + } + + if v.Elem().Kind() == reflect.Slice { + err = session.queryRow(sqlStr, args...).ScanSlice(res) + } else { + err = session.queryRow(sqlStr, args...).Scan(res) + } + if err == sql.ErrNoRows || err == nil { + return nil + } + return err +} + +// Sum call sum some column. bean's non-empty fields are conditions. +func (session *Session) Sum(bean interface{}, columnName string) (res float64, err error) { + return res, session.sum(&res, bean, columnName) +} + +// SumInt call sum some column. bean's non-empty fields are conditions. +func (session *Session) SumInt(bean interface{}, columnName string) (res int64, err error) { + return res, session.sum(&res, bean, columnName) +} + +// Sums call sum some columns. bean's non-empty fields are conditions. +func (session *Session) Sums(bean interface{}, columnNames ...string) ([]float64, error) { + var res = make([]float64, len(columnNames), len(columnNames)) + return res, session.sum(&res, bean, columnNames...) +} + +// SumsInt sum specify columns and return as []int64 instead of []float64 +func (session *Session) SumsInt(bean interface{}, columnNames ...string) ([]int64, error) { + var res = make([]int64, len(columnNames), len(columnNames)) + return res, session.sum(&res, bean, columnNames...) +} diff --git a/vendor/xorm.io/xorm/session_tx.go b/vendor/xorm.io/xorm/session_tx.go new file mode 100644 index 0000000..5779170 --- /dev/null +++ b/vendor/xorm.io/xorm/session_tx.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +// Begin a transaction +func (session *Session) Begin() error { + if session.isAutoCommit { + tx, err := session.DB().BeginTx(session.ctx, nil) + if err != nil { + return err + } + session.isAutoCommit = false + session.isCommitedOrRollbacked = false + session.tx = tx + + session.saveLastSQL("BEGIN TRANSACTION") + } + return nil +} + +// Rollback When using transaction, you can rollback if any error +func (session *Session) Rollback() error { + if !session.isAutoCommit && !session.isCommitedOrRollbacked { + session.saveLastSQL("ROLL BACK") + session.isCommitedOrRollbacked = true + session.isAutoCommit = true + + return session.tx.Rollback() + } + return nil +} + +// Commit When using transaction, Commit will commit all operations. +func (session *Session) Commit() error { + if !session.isAutoCommit && !session.isCommitedOrRollbacked { + session.saveLastSQL("COMMIT") + session.isCommitedOrRollbacked = true + session.isAutoCommit = true + + if err := session.tx.Commit(); err != nil { + return err + } + + // handle processors after tx committed + closureCallFunc := func(closuresPtr *[]func(interface{}), bean interface{}) { + if closuresPtr != nil { + for _, closure := range *closuresPtr { + closure(bean) + } + } + } + + for bean, closuresPtr := range session.afterInsertBeans { + closureCallFunc(closuresPtr, bean) + + if processor, ok := interface{}(bean).(AfterInsertProcessor); ok { + processor.AfterInsert() + } + } + for bean, closuresPtr := range session.afterUpdateBeans { + closureCallFunc(closuresPtr, bean) + + if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { + processor.AfterUpdate() + } + } + for bean, closuresPtr := range session.afterDeleteBeans { + closureCallFunc(closuresPtr, bean) + + if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok { + processor.AfterDelete() + } + } + cleanUpFunc := func(slices *map[interface{}]*[]func(interface{})) { + if len(*slices) > 0 { + *slices = make(map[interface{}]*[]func(interface{}), 0) + } + } + cleanUpFunc(&session.afterInsertBeans) + cleanUpFunc(&session.afterUpdateBeans) + cleanUpFunc(&session.afterDeleteBeans) + } + return nil +} diff --git a/vendor/xorm.io/xorm/session_update.go b/vendor/xorm.io/xorm/session_update.go new file mode 100644 index 0000000..7df8c75 --- /dev/null +++ b/vendor/xorm.io/xorm/session_update.go @@ -0,0 +1,532 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error { + if table == nil || + session.tx != nil { + return ErrCacheFailed + } + + oldhead, newsql := session.statement.ConvertUpdateSQL(sqlStr) + if newsql == "" { + return ErrCacheFailed + } + for _, filter := range session.engine.dialect.Filters() { + newsql = filter.Do(newsql) + } + session.engine.logger.Debugf("[cache] new sql: %v, %v", oldhead, newsql) + + var nStart int + if len(args) > 0 { + if strings.Index(sqlStr, "?") > -1 { + nStart = strings.Count(oldhead, "?") + } else { + // only for pq, TODO: if any other databse? + nStart = strings.Count(oldhead, "$") + } + } + + cacher := session.engine.GetCacher(tableName) + session.engine.logger.Debugf("[cache] get cache sql: %v, %v", newsql, args[nStart:]) + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args[nStart:]) + if err != nil { + rows, err := session.NoCache().queryRows(newsql, args[nStart:]...) + if err != nil { + return err + } + defer rows.Close() + + ids = make([]schemas.PK, 0) + for rows.Next() { + var res = make([]string, len(table.PrimaryKeys)) + err = rows.ScanSlice(&res) + if err != nil { + return err + } + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) + for i, col := range table.PKColumns() { + if col.SQLType.IsNumeric() { + n, err := strconv.ParseInt(res[i], 10, 64) + if err != nil { + return err + } + pk[i] = n + } else if col.SQLType.IsText() { + pk[i] = res[i] + } else { + return errors.New("not supported") + } + } + + ids = append(ids, pk) + } + session.engine.logger.Debugf("[cache] find updated id: %v", ids) + } /*else { + session.engine.LogDebug("[xorm:cacheUpdate] del cached sql:", tableName, newsql, args) + cacher.DelIds(tableName, genSqlKey(newsql, args)) + }*/ + + for _, id := range ids { + sid, err := id.ToString() + if err != nil { + return err + } + if bean := cacher.GetBean(tableName, sid); bean != nil { + sqls := utils.SplitNNoCase(sqlStr, "where", 2) + if len(sqls) == 0 || len(sqls) > 2 { + return ErrCacheFailed + } + + sqls = utils.SplitNNoCase(sqls[0], "set", 2) + if len(sqls) != 2 { + return ErrCacheFailed + } + kvs := strings.Split(strings.TrimSpace(sqls[1]), ",") + + for idx, kv := range kvs { + sps := strings.SplitN(kv, "=", 2) + sps2 := strings.Split(sps[0], ".") + colName := sps2[len(sps2)-1] + colName = session.engine.dialect.Quoter().Trim(colName) + colName = schemas.CommonQuoter.Trim(colName) + + if col := table.GetColumn(colName); col != nil { + fieldValue, err := col.ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } else { + session.engine.logger.Debugf("[cache] set bean field: %v, %v, %v", bean, colName, fieldValue.Interface()) + if col.IsVersion && session.statement.CheckVersion { + session.incrVersionFieldValue(fieldValue) + } else { + fieldValue.Set(reflect.ValueOf(args[idx])) + } + } + } else { + session.engine.logger.Errorf("[cache] ERROR: column %v is not table %v's", + colName, table.Name) + } + } + + session.engine.logger.Debugf("[cache] update cache: %v, %v, %v", tableName, id, bean) + cacher.PutBean(tableName, sid, bean) + } + } + session.engine.logger.Debugf("[cache] clear cached table sql: %v", tableName) + cacher.ClearIds(tableName) + return nil +} + +// Update records, bean's non-empty fields are updated contents, +// condiBean' non-empty filds are conditions +// CAUTION: +// 1.bool will defaultly be updated content nor conditions +// You should call UseBool if you have bool to use. +// 2.float32 & float64 may be not inexact as conditions +func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + if session.statement.LastError != nil { + return 0, session.statement.LastError + } + + v := utils.ReflectValue(bean) + t := v.Type() + + var colNames []string + var args []interface{} + + // handle before update processors + for _, closure := range session.beforeClosures { + closure(bean) + } + cleanupProcessorsClosures(&session.beforeClosures) // cleanup after used + if processor, ok := interface{}(bean).(BeforeUpdateProcessor); ok { + processor.BeforeUpdate() + } + // -- + + var err error + var isMap = t.Kind() == reflect.Map + var isStruct = t.Kind() == reflect.Struct + if isStruct { + if err := session.statement.SetRefBean(bean); err != nil { + return 0, err + } + + if len(session.statement.TableName()) <= 0 { + return 0, ErrTableNotFound + } + + if session.statement.ColumnStr() == "" { + colNames, args, err = session.statement.BuildUpdates(v, false, false, + false, false, true) + } else { + colNames, args, err = session.genUpdateColumns(bean) + } + if err != nil { + return 0, err + } + } else if isMap { + colNames = make([]string, 0) + args = make([]interface{}, 0) + bValue := reflect.Indirect(reflect.ValueOf(bean)) + + for _, v := range bValue.MapKeys() { + colNames = append(colNames, session.engine.Quote(v.String())+" = ?") + args = append(args, bValue.MapIndex(v).Interface()) + } + } else { + return 0, ErrParamsType + } + + table := session.statement.RefTable + + if session.statement.UseAutoTime && table != nil && table.Updated != "" { + if !session.statement.ColumnMap.Contain(table.Updated) && + !session.statement.OmitColumnMap.Contain(table.Updated) { + colNames = append(colNames, session.engine.Quote(table.Updated)+" = ?") + col := table.UpdatedColumn() + val, t := session.engine.nowTime(col) + if session.engine.dialect.URI().DBType == schemas.ORACLE { + args = append(args, t) + } else { + args = append(args, val) + } + + var colName = col.Name + if isStruct { + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } + } + } + + // for update action to like "column = column + ?" + incColumns := session.statement.IncrColumns + for i, colName := range incColumns.ColNames { + colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" + ?") + args = append(args, incColumns.Args[i]) + } + // for update action to like "column = column - ?" + decColumns := session.statement.DecrColumns + for i, colName := range decColumns.ColNames { + colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" - ?") + args = append(args, decColumns.Args[i]) + } + // for update action to like "column = expression" + exprColumns := session.statement.ExprColumns + for i, colName := range exprColumns.ColNames { + switch tp := exprColumns.Args[i].(type) { + case string: + if len(tp) == 0 { + tp = "''" + } + colNames = append(colNames, session.engine.Quote(colName)+"="+tp) + case *builder.Builder: + subQuery, subArgs, err := session.statement.GenCondSQL(tp) + if err != nil { + return 0, err + } + colNames = append(colNames, session.engine.Quote(colName)+"=("+subQuery+")") + args = append(args, subArgs...) + default: + colNames = append(colNames, session.engine.Quote(colName)+"=?") + args = append(args, exprColumns.Args[i]) + } + } + + if err = session.statement.ProcessIDParam(); err != nil { + return 0, err + } + + var autoCond builder.Cond + if !session.statement.NoAutoCondition { + condBeanIsStruct := false + if len(condiBean) > 0 { + if c, ok := condiBean[0].(map[string]interface{}); ok { + autoCond = builder.Eq(c) + } else { + ct := reflect.TypeOf(condiBean[0]) + k := ct.Kind() + if k == reflect.Ptr { + k = ct.Elem().Kind() + } + if k == reflect.Struct { + var err error + autoCond, err = session.statement.BuildConds(session.statement.RefTable, condiBean[0], true, true, false, true, false) + if err != nil { + return 0, err + } + condBeanIsStruct = true + } else { + return 0, ErrConditionType + } + } + } + + if !condBeanIsStruct && table != nil { + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + autoCond1 := session.statement.CondDeleted(col) + + if autoCond == nil { + autoCond = autoCond1 + } else { + autoCond = autoCond.And(autoCond1) + } + } + } + } + + st := session.statement + + var ( + sqlStr string + condArgs []interface{} + condSQL string + cond = session.statement.Conds().And(autoCond) + + doIncVer = isStruct && (table != nil && table.Version != "" && session.statement.CheckVersion) + verValue *reflect.Value + ) + if doIncVer { + verValue, err = table.VersionColumn().ValueOf(bean) + if err != nil { + return 0, err + } + + if verValue != nil { + cond = cond.And(builder.Eq{session.engine.Quote(table.Version): verValue.Interface()}) + colNames = append(colNames, session.engine.Quote(table.Version)+" = "+session.engine.Quote(table.Version)+" + 1") + } + } + + if len(colNames) <= 0 { + return 0, errors.New("No content found to be updated") + } + + condSQL, condArgs, err = session.statement.GenCondSQL(cond) + if err != nil { + return 0, err + } + + if len(condSQL) > 0 { + condSQL = "WHERE " + condSQL + } + + if st.OrderStr != "" { + condSQL = condSQL + fmt.Sprintf(" ORDER BY %v", st.OrderStr) + } + + var tableName = session.statement.TableName() + // TODO: Oracle support needed + var top string + if st.LimitN != nil { + limitValue := *st.LimitN + switch session.engine.dialect.URI().DBType { + case schemas.MYSQL: + condSQL = condSQL + fmt.Sprintf(" LIMIT %d", limitValue) + case schemas.SQLITE: + tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", limitValue) + cond = cond.And(builder.Expr(fmt.Sprintf("rowid IN (SELECT rowid FROM %v %v)", + session.engine.Quote(tableName), tempCondSQL), condArgs...)) + condSQL, condArgs, err = session.statement.GenCondSQL(cond) + if err != nil { + return 0, err + } + if len(condSQL) > 0 { + condSQL = "WHERE " + condSQL + } + case schemas.POSTGRES: + tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", limitValue) + cond = cond.And(builder.Expr(fmt.Sprintf("CTID IN (SELECT CTID FROM %v %v)", + session.engine.Quote(tableName), tempCondSQL), condArgs...)) + condSQL, condArgs, err = session.statement.GenCondSQL(cond) + if err != nil { + return 0, err + } + + if len(condSQL) > 0 { + condSQL = "WHERE " + condSQL + } + case schemas.MSSQL: + if st.OrderStr != "" && table != nil && len(table.PrimaryKeys) == 1 { + cond = builder.Expr(fmt.Sprintf("%s IN (SELECT TOP (%d) %s FROM %v%v)", + table.PrimaryKeys[0], limitValue, table.PrimaryKeys[0], + session.engine.Quote(tableName), condSQL), condArgs...) + + condSQL, condArgs, err = session.statement.GenCondSQL(cond) + if err != nil { + return 0, err + } + if len(condSQL) > 0 { + condSQL = "WHERE " + condSQL + } + } else { + top = fmt.Sprintf("TOP (%d) ", limitValue) + } + } + } + + var tableAlias = session.engine.Quote(tableName) + var fromSQL string + if session.statement.TableAlias != "" { + switch session.engine.dialect.URI().DBType { + case schemas.MSSQL: + fromSQL = fmt.Sprintf("FROM %s %s ", tableAlias, session.statement.TableAlias) + tableAlias = session.statement.TableAlias + default: + tableAlias = fmt.Sprintf("%s AS %s", tableAlias, session.statement.TableAlias) + } + } + + sqlStr = fmt.Sprintf("UPDATE %v%v SET %v %v%v", + top, + tableAlias, + strings.Join(colNames, ", "), + fromSQL, + condSQL) + + res, err := session.exec(sqlStr, append(args, condArgs...)...) + if err != nil { + return 0, err + } else if doIncVer { + if verValue != nil && verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) + } + } + + if cacher := session.engine.GetCacher(tableName); cacher != nil && session.statement.UseCache { + // session.cacheUpdate(table, tableName, sqlStr, args...) + session.engine.logger.Debugf("[cache] clear table: %v", tableName) + cacher.ClearIds(tableName) + cacher.ClearBeans(tableName) + } + + // handle after update processors + if session.isAutoCommit { + for _, closure := range session.afterClosures { + closure(bean) + } + if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { + session.engine.logger.Debugf("[event] %v has after update processor", tableName) + processor.AfterUpdate() + } + } else { + lenAfterClosures := len(session.afterClosures) + if lenAfterClosures > 0 { + if value, has := session.afterUpdateBeans[bean]; has && value != nil { + *value = append(*value, session.afterClosures...) + } else { + afterClosures := make([]func(interface{}), lenAfterClosures) + copy(afterClosures, session.afterClosures) + // FIXME: if bean is a map type, it will panic because map cannot be as map key + session.afterUpdateBeans[bean] = &afterClosures + } + + } else { + if _, ok := interface{}(bean).(AfterUpdateProcessor); ok { + session.afterUpdateBeans[bean] = nil + } + } + } + cleanupProcessorsClosures(&session.afterClosures) // cleanup after used + // -- + + return res.RowsAffected() +} + +func (session *Session) genUpdateColumns(bean interface{}) ([]string, []interface{}, error) { + table := session.statement.RefTable + colNames := make([]string, 0, len(table.ColumnsSeq())) + args := make([]interface{}, 0, len(table.ColumnsSeq())) + + for _, col := range table.Columns() { + if !col.IsVersion && !col.IsCreated && !col.IsUpdated { + if session.statement.OmitColumnMap.Contain(col.Name) { + continue + } + } + if col.MapType == schemas.ONLYFROMDB { + continue + } + + fieldValuePtr, err := col.ValueOf(bean) + if err != nil { + return nil, nil, err + } + fieldValue := *fieldValuePtr + + if col.IsAutoIncrement && utils.IsValueZero(fieldValue) { + continue + } + + if (col.IsDeleted && !session.statement.GetUnscoped()) || col.IsCreated { + continue + } + + // if only update specify columns + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { + continue + } + + if session.statement.IncrColumns.IsColExist(col.Name) { + continue + } else if session.statement.DecrColumns.IsColExist(col.Name) { + continue + } else if session.statement.ExprColumns.IsColExist(col.Name) { + continue + } + + // !evalphobia! set fieldValue as nil when column is nullable and zero-value + if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { + if col.Nullable && utils.IsValueZero(fieldValue) { + var nilValue *int + fieldValue = reflect.ValueOf(nilValue) + } + } + + if col.IsUpdated && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ { + // if time is non-empty, then set to auto time + val, t := session.engine.nowTime(col) + args = append(args, val) + + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } else if col.IsVersion && session.statement.CheckVersion { + args = append(args, 1) + } else { + arg, err := session.statement.Value2Interface(col, fieldValue) + if err != nil { + return colNames, args, err + } + args = append(args, arg) + } + + colNames = append(colNames, session.engine.Quote(col.Name)+" = ?") + } + return colNames, args, nil +} diff --git a/vendor/xorm.io/xorm/tags/parser.go b/vendor/xorm.io/xorm/tags/parser.go new file mode 100644 index 0000000..add30a1 --- /dev/null +++ b/vendor/xorm.io/xorm/tags/parser.go @@ -0,0 +1,308 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tags + +import ( + "encoding/gob" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" +) + +var ( + ErrUnsupportedType = errors.New("Unsupported type") +) + +type Parser struct { + identifier string + dialect dialects.Dialect + columnMapper names.Mapper + tableMapper names.Mapper + handlers map[string]Handler + cacherMgr *caches.Manager + tableCache sync.Map // map[reflect.Type]*schemas.Table +} + +func NewParser(identifier string, dialect dialects.Dialect, tableMapper, columnMapper names.Mapper, cacherMgr *caches.Manager) *Parser { + return &Parser{ + identifier: identifier, + dialect: dialect, + tableMapper: tableMapper, + columnMapper: columnMapper, + handlers: defaultTagHandlers, + cacherMgr: cacherMgr, + } +} + +func (parser *Parser) GetTableMapper() names.Mapper { + return parser.tableMapper +} + +func (parser *Parser) SetTableMapper(mapper names.Mapper) { + parser.ClearCaches() + parser.tableMapper = mapper +} + +func (parser *Parser) GetColumnMapper() names.Mapper { + return parser.columnMapper +} + +func (parser *Parser) SetColumnMapper(mapper names.Mapper) { + parser.ClearCaches() + parser.columnMapper = mapper +} + +func (parser *Parser) ParseWithCache(v reflect.Value) (*schemas.Table, error) { + t := v.Type() + tableI, ok := parser.tableCache.Load(t) + if ok { + return tableI.(*schemas.Table), nil + } + + table, err := parser.Parse(v) + if err != nil { + return nil, err + } + + parser.tableCache.Store(t, table) + + if parser.cacherMgr.GetDefaultCacher() != nil { + if v.CanAddr() { + gob.Register(v.Addr().Interface()) + } else { + gob.Register(v.Interface()) + } + } + + return table, nil +} + +// ClearCacheTable removes the database mapper of a type from the cache +func (parser *Parser) ClearCacheTable(t reflect.Type) { + parser.tableCache.Delete(t) +} + +// ClearCaches removes all the cached table information parsed by structs +func (parser *Parser) ClearCaches() { + parser.tableCache = sync.Map{} +} + +func addIndex(indexName string, table *schemas.Table, col *schemas.Column, indexType int) { + if index, ok := table.Indexes[indexName]; ok { + index.AddColumn(col.Name) + col.Indexes[index.Name] = indexType + } else { + index := schemas.NewIndex(indexName, indexType) + index.AddColumn(col.Name) + table.AddIndex(index) + col.Indexes[index.Name] = indexType + } +} + +// Parse parses a struct as a table information +func (parser *Parser) Parse(v reflect.Value) (*schemas.Table, error) { + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + v = v.Elem() + } + if t.Kind() != reflect.Struct { + return nil, ErrUnsupportedType + } + + table := schemas.NewEmptyTable() + table.Type = t + table.Name = names.GetTableName(parser.tableMapper, v) + + var idFieldColName string + var hasCacheTag, hasNoCacheTag bool + + for i := 0; i < t.NumField(); i++ { + tag := t.Field(i).Tag + + ormTagStr := tag.Get(parser.identifier) + var col *schemas.Column + fieldValue := v.Field(i) + fieldType := fieldValue.Type() + + if ormTagStr != "" { + col = &schemas.Column{ + FieldName: t.Field(i).Name, + Nullable: true, + IsPrimaryKey: false, + IsAutoIncrement: false, + MapType: schemas.TWOSIDES, + Indexes: make(map[string]int), + DefaultIsEmpty: true, + } + tags := splitTag(ormTagStr) + + if len(tags) > 0 { + if tags[0] == "-" { + continue + } + + var ctx = Context{ + table: table, + col: col, + fieldValue: fieldValue, + indexNames: make(map[string]int), + parser: parser, + } + + if strings.HasPrefix(strings.ToUpper(tags[0]), "EXTENDS") { + pStart := strings.Index(tags[0], "(") + if pStart > -1 && strings.HasSuffix(tags[0], ")") { + var tagPrefix = strings.TrimFunc(tags[0][pStart+1:len(tags[0])-1], func(r rune) bool { + return r == '\'' || r == '"' + }) + + ctx.params = []string{tagPrefix} + } + + if err := ExtendsTagHandler(&ctx); err != nil { + return nil, err + } + continue + } + + for j, key := range tags { + if ctx.ignoreNext { + ctx.ignoreNext = false + continue + } + + k := strings.ToUpper(key) + ctx.tagName = k + ctx.params = []string{} + + pStart := strings.Index(k, "(") + if pStart == 0 { + return nil, errors.New("( could not be the first character") + } + if pStart > -1 { + if !strings.HasSuffix(k, ")") { + return nil, fmt.Errorf("field %s tag %s cannot match ) character", col.FieldName, key) + } + + ctx.tagName = k[:pStart] + ctx.params = strings.Split(key[pStart+1:len(k)-1], ",") + } + + if j > 0 { + ctx.preTag = strings.ToUpper(tags[j-1]) + } + if j < len(tags)-1 { + ctx.nextTag = tags[j+1] + } else { + ctx.nextTag = "" + } + + if h, ok := parser.handlers[ctx.tagName]; ok { + if err := h(&ctx); err != nil { + return nil, err + } + } else { + if strings.HasPrefix(key, "'") && strings.HasSuffix(key, "'") { + col.Name = key[1 : len(key)-1] + } else { + col.Name = key + } + } + + if ctx.hasCacheTag { + hasCacheTag = true + } + if ctx.hasNoCacheTag { + hasNoCacheTag = true + } + } + + if col.SQLType.Name == "" { + col.SQLType = schemas.Type2SQLType(fieldType) + } + parser.dialect.SQLType(col) + if col.Length == 0 { + col.Length = col.SQLType.DefaultLength + } + if col.Length2 == 0 { + col.Length2 = col.SQLType.DefaultLength2 + } + if col.Name == "" { + col.Name = parser.columnMapper.Obj2Table(t.Field(i).Name) + } + + if ctx.isUnique { + ctx.indexNames[col.Name] = schemas.UniqueType + } else if ctx.isIndex { + ctx.indexNames[col.Name] = schemas.IndexType + } + + for indexName, indexType := range ctx.indexNames { + addIndex(indexName, table, col, indexType) + } + } + } else { + var sqlType schemas.SQLType + if fieldValue.CanAddr() { + if _, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + sqlType = schemas.SQLType{Name: schemas.Text} + } + } + if _, ok := fieldValue.Interface().(convert.Conversion); ok { + sqlType = schemas.SQLType{Name: schemas.Text} + } else { + sqlType = schemas.Type2SQLType(fieldType) + } + col = schemas.NewColumn(parser.columnMapper.Obj2Table(t.Field(i).Name), + t.Field(i).Name, sqlType, sqlType.DefaultLength, + sqlType.DefaultLength2, true) + + if fieldType.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) { + idFieldColName = col.Name + } + } + if col.IsAutoIncrement { + col.Nullable = false + } + + table.AddColumn(col) + + } // end for + + if idFieldColName != "" && len(table.PrimaryKeys) == 0 { + col := table.GetColumn(idFieldColName) + col.IsPrimaryKey = true + col.IsAutoIncrement = true + col.Nullable = false + table.PrimaryKeys = append(table.PrimaryKeys, col.Name) + table.AutoIncrement = col.Name + } + + if hasCacheTag { + if parser.cacherMgr.GetDefaultCacher() != nil { // !nash! use engine's cacher if provided + //engine.logger.Info("enable cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher()) + } else { + //engine.logger.Info("enable LRU cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000)) + } + } + if hasNoCacheTag { + //engine.logger.Info("disable cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, nil) + } + + return table, nil +} diff --git a/vendor/xorm.io/xorm/tags/tag.go b/vendor/xorm.io/xorm/tags/tag.go new file mode 100644 index 0000000..ee3f1e8 --- /dev/null +++ b/vendor/xorm.io/xorm/tags/tag.go @@ -0,0 +1,332 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tags + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "xorm.io/xorm/schemas" +) + +func splitTag(tag string) (tags []string) { + tag = strings.TrimSpace(tag) + var hasQuote = false + var lastIdx = 0 + for i, t := range tag { + if t == '\'' { + hasQuote = !hasQuote + } else if t == ' ' { + if lastIdx < i && !hasQuote { + tags = append(tags, strings.TrimSpace(tag[lastIdx:i])) + lastIdx = i + 1 + } + } + } + if lastIdx < len(tag) { + tags = append(tags, strings.TrimSpace(tag[lastIdx:])) + } + return +} + +// Context represents a context for xorm tag parse. +type Context struct { + tagName string + params []string + preTag, nextTag string + table *schemas.Table + col *schemas.Column + fieldValue reflect.Value + isIndex bool + isUnique bool + indexNames map[string]int + parser *Parser + hasCacheTag bool + hasNoCacheTag bool + ignoreNext bool +} + +// Handler describes tag handler for XORM +type Handler func(ctx *Context) error + +var ( + // defaultTagHandlers enumerates all the default tag handler + defaultTagHandlers = map[string]Handler{ + "<-": OnlyFromDBTagHandler, + "->": OnlyToDBTagHandler, + "PK": PKTagHandler, + "NULL": NULLTagHandler, + "NOT": IgnoreTagHandler, + "AUTOINCR": AutoIncrTagHandler, + "DEFAULT": DefaultTagHandler, + "CREATED": CreatedTagHandler, + "UPDATED": UpdatedTagHandler, + "DELETED": DeletedTagHandler, + "VERSION": VersionTagHandler, + "UTC": UTCTagHandler, + "LOCAL": LocalTagHandler, + "NOTNULL": NotNullTagHandler, + "INDEX": IndexTagHandler, + "UNIQUE": UniqueTagHandler, + "CACHE": CacheTagHandler, + "NOCACHE": NoCacheTagHandler, + "COMMENT": CommentTagHandler, + } +) + +func init() { + for k := range schemas.SqlTypes { + defaultTagHandlers[k] = SQLTypeTagHandler + } +} + +// IgnoreTagHandler describes ignored tag handler +func IgnoreTagHandler(ctx *Context) error { + return nil +} + +// OnlyFromDBTagHandler describes mapping direction tag handler +func OnlyFromDBTagHandler(ctx *Context) error { + ctx.col.MapType = schemas.ONLYFROMDB + return nil +} + +// OnlyToDBTagHandler describes mapping direction tag handler +func OnlyToDBTagHandler(ctx *Context) error { + ctx.col.MapType = schemas.ONLYTODB + return nil +} + +// PKTagHandler describes primary key tag handler +func PKTagHandler(ctx *Context) error { + ctx.col.IsPrimaryKey = true + ctx.col.Nullable = false + return nil +} + +// NULLTagHandler describes null tag handler +func NULLTagHandler(ctx *Context) error { + ctx.col.Nullable = (strings.ToUpper(ctx.preTag) != "NOT") + return nil +} + +// NotNullTagHandler describes notnull tag handler +func NotNullTagHandler(ctx *Context) error { + ctx.col.Nullable = false + return nil +} + +// AutoIncrTagHandler describes autoincr tag handler +func AutoIncrTagHandler(ctx *Context) error { + ctx.col.IsAutoIncrement = true + /* + if len(ctx.params) > 0 { + autoStartInt, err := strconv.Atoi(ctx.params[0]) + if err != nil { + return err + } + ctx.col.AutoIncrStart = autoStartInt + } else { + ctx.col.AutoIncrStart = 1 + } + */ + return nil +} + +// DefaultTagHandler describes default tag handler +func DefaultTagHandler(ctx *Context) error { + if len(ctx.params) > 0 { + ctx.col.Default = ctx.params[0] + } else { + ctx.col.Default = ctx.nextTag + ctx.ignoreNext = true + } + ctx.col.DefaultIsEmpty = false + return nil +} + +// CreatedTagHandler describes created tag handler +func CreatedTagHandler(ctx *Context) error { + ctx.col.IsCreated = true + return nil +} + +// VersionTagHandler describes version tag handler +func VersionTagHandler(ctx *Context) error { + ctx.col.IsVersion = true + ctx.col.Default = "1" + return nil +} + +// UTCTagHandler describes utc tag handler +func UTCTagHandler(ctx *Context) error { + ctx.col.TimeZone = time.UTC + return nil +} + +// LocalTagHandler describes local tag handler +func LocalTagHandler(ctx *Context) error { + if len(ctx.params) == 0 { + ctx.col.TimeZone = time.Local + } else { + var err error + ctx.col.TimeZone, err = time.LoadLocation(ctx.params[0]) + if err != nil { + return err + } + } + return nil +} + +// UpdatedTagHandler describes updated tag handler +func UpdatedTagHandler(ctx *Context) error { + ctx.col.IsUpdated = true + return nil +} + +// DeletedTagHandler describes deleted tag handler +func DeletedTagHandler(ctx *Context) error { + ctx.col.IsDeleted = true + return nil +} + +// IndexTagHandler describes index tag handler +func IndexTagHandler(ctx *Context) error { + if len(ctx.params) > 0 { + ctx.indexNames[ctx.params[0]] = schemas.IndexType + } else { + ctx.isIndex = true + } + return nil +} + +// UniqueTagHandler describes unique tag handler +func UniqueTagHandler(ctx *Context) error { + if len(ctx.params) > 0 { + ctx.indexNames[ctx.params[0]] = schemas.UniqueType + } else { + ctx.isUnique = true + } + return nil +} + +// CommentTagHandler add comment to column +func CommentTagHandler(ctx *Context) error { + if len(ctx.params) > 0 { + ctx.col.Comment = strings.Trim(ctx.params[0], "' ") + } + return nil +} + +// SQLTypeTagHandler describes SQL Type tag handler +func SQLTypeTagHandler(ctx *Context) error { + ctx.col.SQLType = schemas.SQLType{Name: ctx.tagName} + if len(ctx.params) > 0 { + if ctx.tagName == schemas.Enum { + ctx.col.EnumOptions = make(map[string]int) + for k, v := range ctx.params { + v = strings.TrimSpace(v) + v = strings.Trim(v, "'") + ctx.col.EnumOptions[v] = k + } + } else if ctx.tagName == schemas.Set { + ctx.col.SetOptions = make(map[string]int) + for k, v := range ctx.params { + v = strings.TrimSpace(v) + v = strings.Trim(v, "'") + ctx.col.SetOptions[v] = k + } + } else { + var err error + if len(ctx.params) == 2 { + ctx.col.Length, err = strconv.Atoi(ctx.params[0]) + if err != nil { + return err + } + ctx.col.Length2, err = strconv.Atoi(ctx.params[1]) + if err != nil { + return err + } + } else if len(ctx.params) == 1 { + ctx.col.Length, err = strconv.Atoi(ctx.params[0]) + if err != nil { + return err + } + } + } + } + return nil +} + +// ExtendsTagHandler describes extends tag handler +func ExtendsTagHandler(ctx *Context) error { + var fieldValue = ctx.fieldValue + var isPtr = false + switch fieldValue.Kind() { + case reflect.Ptr: + f := fieldValue.Type().Elem() + if f.Kind() == reflect.Struct { + fieldPtr := fieldValue + fieldValue = fieldValue.Elem() + if !fieldValue.IsValid() || fieldPtr.IsNil() { + fieldValue = reflect.New(f).Elem() + } + } + isPtr = true + fallthrough + case reflect.Struct: + parentTable, err := ctx.parser.Parse(fieldValue) + if err != nil { + return err + } + for _, col := range parentTable.Columns() { + col.FieldName = fmt.Sprintf("%v.%v", ctx.col.FieldName, col.FieldName) + + var tagPrefix = ctx.col.FieldName + if len(ctx.params) > 0 { + col.Nullable = isPtr + tagPrefix = ctx.params[0] + if col.IsPrimaryKey { + col.Name = ctx.col.FieldName + col.IsPrimaryKey = false + } else { + col.Name = fmt.Sprintf("%v%v", tagPrefix, col.Name) + } + } + + if col.Nullable { + col.IsAutoIncrement = false + col.IsPrimaryKey = false + } + + ctx.table.AddColumn(col) + for indexName, indexType := range col.Indexes { + addIndex(indexName, ctx.table, col, indexType) + } + } + default: + //TODO: warning + } + return nil +} + +// CacheTagHandler describes cache tag handler +func CacheTagHandler(ctx *Context) error { + if !ctx.hasCacheTag { + ctx.hasCacheTag = true + } + return nil +} + +// NoCacheTagHandler describes nocache tag handler +func NoCacheTagHandler(ctx *Context) error { + if !ctx.hasNoCacheTag { + ctx.hasNoCacheTag = true + } + return nil +}