From 7eb9bf0efa07de2cb02a728e8e06aeb33614f722 Mon Sep 17 00:00:00 2001
From: henrylee2cn
Date: Fri, 9 Aug 2019 00:25:22 +0800
Subject: [PATCH] style: Add go.mod and update vendor
Change-Id: Ib0e6dcc73d4ce97b8a0413c65af5f1cc4d817e4b
---
ext/db/directsql/sqlengine.go | 2 +-
ext/db/directsql/sqlhelper.go | 2 +-
ext/db/directsql/sqlmanage.go | 2 +-
ext/db/directsql/sqlservice.go | 2 +-
ext/db/xorm/logger.go | 2 +-
ext/db/xorm/service.go | 2 +-
go.mod | 43 +
go.sum | 266 +
vendor/github.com/bradfitz/gomemcache/LICENSE | 202 +
.../bradfitz/gomemcache/memcache/memcache.go | 687 +
.../bradfitz/gomemcache/memcache/selector.go | 129 +
.../couchbase/go-couchbase/.gitignore | 14 +
.../couchbase/go-couchbase/.travis.yml | 5 +
.../github.com/couchbase/go-couchbase/LICENSE | 19 +
.../couchbase/go-couchbase/README.markdown | 37 +
.../couchbase/go-couchbase/audit.go | 32 +
.../couchbase/go-couchbase/client.go | 1477 +
.../couchbase/go-couchbase/conn_pool.go | 415 +
.../couchbase/go-couchbase/ddocs.go | 288 +
.../couchbase/go-couchbase/observe.go | 300 +
.../couchbase/go-couchbase/pools.go | 1474 +
.../couchbase/go-couchbase/port_map.go | 84 +
.../couchbase/go-couchbase/streaming.go | 215 +
.../github.com/couchbase/go-couchbase/tap.go | 143 +
.../github.com/couchbase/go-couchbase/upr.go | 398 +
.../couchbase/go-couchbase/users.go | 119 +
.../github.com/couchbase/go-couchbase/util.go | 49 +
.../couchbase/go-couchbase/vbmap.go | 77 +
.../couchbase/go-couchbase/views.go | 231 +
.../couchbase/gomemcached/.gitignore | 6 +
.../github.com/couchbase/gomemcached/LICENSE | 19 +
.../couchbase/gomemcached/README.markdown | 32 +
.../couchbase/gomemcached/client/mc.go | 1140 +
.../couchbase/gomemcached/client/tap_feed.go | 333 +
.../couchbase/gomemcached/client/transport.go | 67 +
.../couchbase/gomemcached/client/upr_feed.go | 1107 +
.../couchbase/gomemcached/mc_constants.go | 345 +
.../couchbase/gomemcached/mc_req.go | 197 +
.../couchbase/gomemcached/mc_res.go | 267 +
.../github.com/couchbase/gomemcached/tap.go | 168 +
.../github.com/couchbase/goutils/LICENSE.md | 47 +
.../couchbase/goutils/logging/logger.go | 482 +
.../couchbase/goutils/logging/logger_golog.go | 365 +
.../couchbase/goutils/scramsha/scramsha.go | 207 +
.../goutils/scramsha/scramsha_http.go | 252 +
vendor/github.com/cupcake/rdb/.gitignore | 25 +
vendor/github.com/cupcake/rdb/.travis.yml | 6 +
vendor/github.com/cupcake/rdb/LICENCE | 21 +
vendor/github.com/cupcake/rdb/README.md | 17 +
vendor/github.com/cupcake/rdb/crc64/crc64.go | 64 +
vendor/github.com/cupcake/rdb/decoder.go | 824 +
vendor/github.com/cupcake/rdb/encoder.go | 130 +
.../cupcake/rdb/nopdecoder/nop_decoder.go | 24 +
vendor/github.com/cupcake/rdb/slice_buffer.go | 67 +
vendor/github.com/davecgh/go-spew/LICENSE | 15 +
.../github.com/davecgh/go-spew/spew/bypass.go | 145 +
.../davecgh/go-spew/spew/bypasssafe.go | 38 +
.../github.com/davecgh/go-spew/spew/common.go | 341 +
.../github.com/davecgh/go-spew/spew/config.go | 306 +
vendor/github.com/davecgh/go-spew/spew/doc.go | 211 +
.../github.com/davecgh/go-spew/spew/dump.go | 509 +
.../github.com/davecgh/go-spew/spew/format.go | 419 +
.../github.com/davecgh/go-spew/spew/spew.go | 148 +
vendor/github.com/edsrzf/mmap-go/.gitignore | 8 +
vendor/github.com/edsrzf/mmap-go/LICENSE | 25 +
vendor/github.com/edsrzf/mmap-go/README.md | 12 +
vendor/github.com/edsrzf/mmap-go/mmap.go | 117 +
vendor/github.com/edsrzf/mmap-go/mmap_unix.go | 51 +
.../github.com/edsrzf/mmap-go/mmap_windows.go | 143 +
.../elazarl/go-bindata-assetfs/LICENSE | 46 +-
.../elazarl/go-bindata-assetfs/README.md | 92 +-
.../elazarl/go-bindata-assetfs/assetfs.go | 13 +-
.../go-bindata-assetfs/main.go | 100 -
.../github.com/facebookgo/ensure/.travis.yml | 20 +
vendor/github.com/facebookgo/ensure/ensure.go | 353 +
vendor/github.com/facebookgo/ensure/license | 30 +
vendor/github.com/facebookgo/ensure/patents | 33 +
vendor/github.com/facebookgo/ensure/readme.md | 4 +
.../facebookgo/freeport/.travis.yml | 24 +
.../facebookgo/freeport/freeport.go | 24 +
vendor/github.com/facebookgo/freeport/license | 30 +
vendor/github.com/facebookgo/freeport/patents | 33 +
.../github.com/facebookgo/freeport/readme.md | 5 +
.../github.com/facebookgo/stack/.travis.yml | 23 +
vendor/github.com/facebookgo/stack/license | 30 +
vendor/github.com/facebookgo/stack/patents | 33 +
vendor/github.com/facebookgo/stack/readme.md | 142 +
vendor/github.com/facebookgo/stack/stack.go | 230 +
.../github.com/facebookgo/subset/.travis.yml | 24 +
vendor/github.com/facebookgo/subset/license | 30 +
vendor/github.com/facebookgo/subset/patents | 33 +
vendor/github.com/facebookgo/subset/readme.md | 5 +
vendor/github.com/facebookgo/subset/subset.go | 200 +
.../github.com/flosch/pongo2/.gitattributes | 1 +
vendor/github.com/flosch/pongo2/.gitignore | 42 +
vendor/github.com/flosch/pongo2/.travis.yml | 8 +
vendor/github.com/flosch/pongo2/AUTHORS | 11 +
vendor/github.com/flosch/pongo2/LICENSE | 20 +
vendor/github.com/flosch/pongo2/README.md | 273 +
vendor/github.com/flosch/pongo2/context.go | 136 +
vendor/github.com/flosch/pongo2/doc.go | 31 +
vendor/github.com/flosch/pongo2/error.go | 91 +
vendor/github.com/flosch/pongo2/filters.go | 143 +
.../flosch/pongo2/filters_builtin.go | 927 +
vendor/github.com/flosch/pongo2/go.mod | 13 +
vendor/github.com/flosch/pongo2/helpers.go | 15 +
vendor/github.com/flosch/pongo2/lexer.go | 432 +
vendor/github.com/flosch/pongo2/nodes.go | 16 +
vendor/github.com/flosch/pongo2/nodes_html.go | 23 +
.../github.com/flosch/pongo2/nodes_wrapper.go | 16 +
vendor/github.com/flosch/pongo2/options.go | 26 +
vendor/github.com/flosch/pongo2/parser.go | 309 +
.../flosch/pongo2/parser_document.go | 59 +
.../flosch/pongo2/parser_expression.go | 503 +
vendor/github.com/flosch/pongo2/pongo2.go | 14 +
vendor/github.com/flosch/pongo2/tags.go | 135 +
.../flosch/pongo2/tags_autoescape.go | 52 +
vendor/github.com/flosch/pongo2/tags_block.go | 129 +
.../github.com/flosch/pongo2/tags_comment.go | 27 +
vendor/github.com/flosch/pongo2/tags_cycle.go | 106 +
.../github.com/flosch/pongo2/tags_extends.go | 52 +
.../github.com/flosch/pongo2/tags_filter.go | 95 +
.../github.com/flosch/pongo2/tags_firstof.go | 49 +
vendor/github.com/flosch/pongo2/tags_for.go | 159 +
vendor/github.com/flosch/pongo2/tags_if.go | 76 +
.../flosch/pongo2/tags_ifchanged.go | 116 +
.../github.com/flosch/pongo2/tags_ifequal.go | 78 +
.../flosch/pongo2/tags_ifnotequal.go | 78 +
.../github.com/flosch/pongo2/tags_import.go | 84 +
.../github.com/flosch/pongo2/tags_include.go | 146 +
vendor/github.com/flosch/pongo2/tags_lorem.go | 133 +
vendor/github.com/flosch/pongo2/tags_macro.go | 149 +
vendor/github.com/flosch/pongo2/tags_now.go | 50 +
vendor/github.com/flosch/pongo2/tags_set.go | 50 +
.../flosch/pongo2/tags_spaceless.go | 54 +
vendor/github.com/flosch/pongo2/tags_ssi.go | 68 +
.../flosch/pongo2/tags_templatetag.go | 45 +
.../flosch/pongo2/tags_widthratio.go | 83 +
vendor/github.com/flosch/pongo2/tags_with.go | 88 +
vendor/github.com/flosch/pongo2/template.go | 277 +
.../flosch/pongo2/template_loader.go | 157 +
.../github.com/flosch/pongo2/template_sets.go | 305 +
vendor/github.com/flosch/pongo2/value.go | 520 +
vendor/github.com/flosch/pongo2/variable.go | 695 +
.../fsnotify/fsnotify/.editorconfig | 5 +
.../github.com/fsnotify/fsnotify/.gitignore | 6 +
.../github.com/fsnotify/fsnotify/.travis.yml | 30 +
vendor/github.com/fsnotify/fsnotify/AUTHORS | 52 +
.../github.com/fsnotify/fsnotify/CHANGELOG.md | 317 +
.../fsnotify/fsnotify/CONTRIBUTING.md | 77 +
vendor/github.com/fsnotify/fsnotify/LICENSE | 28 +
vendor/github.com/fsnotify/fsnotify/README.md | 79 +
vendor/github.com/fsnotify/fsnotify/fen.go | 37 +
.../github.com/fsnotify/fsnotify/fsnotify.go | 66 +
.../github.com/fsnotify/fsnotify/inotify.go | 337 +
.../fsnotify/fsnotify/inotify_poller.go | 187 +
vendor/github.com/fsnotify/fsnotify/kqueue.go | 521 +
.../fsnotify/fsnotify/open_mode_bsd.go | 11 +
.../fsnotify/fsnotify/open_mode_darwin.go | 12 +
.../github.com/fsnotify/fsnotify/windows.go | 561 +
vendor/github.com/garyburd/redigo/LICENSE | 175 +
.../garyburd/redigo/internal/commandinfo.go | 54 +
.../github.com/garyburd/redigo/redis/conn.go | 673 +
.../github.com/garyburd/redigo/redis/doc.go | 177 +
.../github.com/garyburd/redigo/redis/go16.go | 27 +
.../github.com/garyburd/redigo/redis/go17.go | 29 +
.../github.com/garyburd/redigo/redis/go18.go | 9 +
.../github.com/garyburd/redigo/redis/log.go | 134 +
.../github.com/garyburd/redigo/redis/pool.go | 527 +
.../garyburd/redigo/redis/pool17.go | 35 +
.../garyburd/redigo/redis/pubsub.go | 157 +
.../github.com/garyburd/redigo/redis/redis.go | 117 +
.../github.com/garyburd/redigo/redis/reply.go | 479 +
.../github.com/garyburd/redigo/redis/scan.go | 585 +
.../garyburd/redigo/redis/script.go | 91 +
.../github.com/go-sql-driver/mysql/.gitignore | 9 +
.../go-sql-driver/mysql/.travis.yml | 107 +
vendor/github.com/go-sql-driver/mysql/AUTHORS | 89 +
.../go-sql-driver/mysql/CHANGELOG.md | 178 +
.../go-sql-driver/mysql/CONTRIBUTING.md | 23 +
vendor/github.com/go-sql-driver/mysql/LICENSE | 373 +
.../github.com/go-sql-driver/mysql/README.md | 490 +
.../go-sql-driver/mysql/appengine.go | 19 +
vendor/github.com/go-sql-driver/mysql/auth.go | 420 +
.../github.com/go-sql-driver/mysql/buffer.go | 147 +
.../go-sql-driver/mysql/collations.go | 251 +
.../go-sql-driver/mysql/connection.go | 461 +
.../go-sql-driver/mysql/connection_go18.go | 207 +
.../github.com/go-sql-driver/mysql/const.go | 174 +
.../github.com/go-sql-driver/mysql/driver.go | 172 +
vendor/github.com/go-sql-driver/mysql/dsn.go | 611 +
.../github.com/go-sql-driver/mysql/errors.go | 65 +
.../github.com/go-sql-driver/mysql/fields.go | 194 +
.../github.com/go-sql-driver/mysql/infile.go | 182 +
.../github.com/go-sql-driver/mysql/packets.go | 1286 +
.../github.com/go-sql-driver/mysql/result.go | 22 +
vendor/github.com/go-sql-driver/mysql/rows.go | 216 +
.../go-sql-driver/mysql/statement.go | 211 +
.../go-sql-driver/mysql/transaction.go | 31 +
.../github.com/go-sql-driver/mysql/utils.go | 726 +
.../go-sql-driver/mysql/utils_go17.go | 40 +
.../go-sql-driver/mysql/utils_go18.go | 50 +
vendor/github.com/go-xorm/xorm/.drone.yml | 124 +
vendor/github.com/go-xorm/xorm/.gitignore | 33 +
.../github.com/go-xorm/xorm/CONTRIBUTING.md | 46 +
vendor/github.com/go-xorm/xorm/LICENSE | 27 +
vendor/github.com/go-xorm/xorm/README.md | 503 +
vendor/github.com/go-xorm/xorm/README_CN.md | 500 +
vendor/github.com/go-xorm/xorm/cache_lru.go | 284 +
.../go-xorm/xorm/cache_memory_store.go | 51 +
.../github.com/go-xorm/xorm/context_cache.go | 30 +
vendor/github.com/go-xorm/xorm/convert.go | 348 +
.../github.com/go-xorm/xorm/dialect_mssql.go | 568 +
.../github.com/go-xorm/xorm/dialect_mysql.go | 656 +
.../github.com/go-xorm/xorm/dialect_oracle.go | 902 +
.../go-xorm/xorm/dialect_postgres.go | 1247 +
.../go-xorm/xorm/dialect_sqlite3.go | 456 +
vendor/github.com/go-xorm/xorm/doc.go | 184 +
vendor/github.com/go-xorm/xorm/engine.go | 1645 +
vendor/github.com/go-xorm/xorm/engine_cond.go | 232 +
.../github.com/go-xorm/xorm/engine_context.go | 28 +
.../github.com/go-xorm/xorm/engine_group.go | 219 +
.../go-xorm/xorm/engine_group_policy.go | 116 +
.../github.com/go-xorm/xorm/engine_table.go | 113 +
vendor/github.com/go-xorm/xorm/error.go | 51 +
.../github.com/go-xorm/xorm/gen_reserved.sh | 6 +
vendor/github.com/go-xorm/xorm/go.mod | 19 +
vendor/github.com/go-xorm/xorm/go.sum | 168 +
vendor/github.com/go-xorm/xorm/helpers.go | 332 +
.../github.com/go-xorm/xorm/helpler_time.go | 21 +
vendor/github.com/go-xorm/xorm/interface.go | 118 +
vendor/github.com/go-xorm/xorm/json.go | 31 +
vendor/github.com/go-xorm/xorm/logger.go | 187 +
.../github.com/go-xorm/xorm/pg_reserved.txt | 746 +
vendor/github.com/go-xorm/xorm/processors.go | 78 +
vendor/github.com/go-xorm/xorm/rows.go | 121 +
vendor/github.com/go-xorm/xorm/session.go | 866 +
.../github.com/go-xorm/xorm/session_cols.go | 199 +
.../github.com/go-xorm/xorm/session_cond.go | 70 +
.../go-xorm/xorm/session_context.go | 23 +
.../go-xorm/xorm/session_convert.go | 661 +
.../github.com/go-xorm/xorm/session_delete.go | 244 +
.../github.com/go-xorm/xorm/session_exist.go | 96 +
.../github.com/go-xorm/xorm/session_find.go | 505 +
vendor/github.com/go-xorm/xorm/session_get.go | 356 +
.../github.com/go-xorm/xorm/session_insert.go | 739 +
.../go-xorm/xorm/session_iterate.go | 100 +
.../github.com/go-xorm/xorm/session_query.go | 320 +
vendor/github.com/go-xorm/xorm/session_raw.go | 227 +
.../github.com/go-xorm/xorm/session_schema.go | 421 +
.../github.com/go-xorm/xorm/session_stats.go | 98 +
vendor/github.com/go-xorm/xorm/session_tx.go | 83 +
.../github.com/go-xorm/xorm/session_update.go | 512 +
vendor/github.com/go-xorm/xorm/statement.go | 1262 +
vendor/github.com/go-xorm/xorm/syslogger.go | 89 +
vendor/github.com/go-xorm/xorm/tag.go | 310 +
vendor/github.com/go-xorm/xorm/test_mssql.sh | 1 +
.../go-xorm/xorm/test_mssql_cache.sh | 1 +
.../github.com/go-xorm/xorm/test_mymysql.sh | 1 +
.../go-xorm/xorm/test_mymysql_cache.sh | 1 +
vendor/github.com/go-xorm/xorm/test_mysql.sh | 1 +
.../go-xorm/xorm/test_mysql_cache.sh | 1 +
.../github.com/go-xorm/xorm/test_postgres.sh | 1 +
.../go-xorm/xorm/test_postgres_cache.sh | 1 +
vendor/github.com/go-xorm/xorm/test_sqlite.sh | 1 +
.../go-xorm/xorm/test_sqlite_cache.sh | 1 +
vendor/github.com/go-xorm/xorm/test_tidb.sh | 1 +
vendor/github.com/go-xorm/xorm/transaction.go | 26 +
vendor/github.com/go-xorm/xorm/types.go | 16 +
vendor/github.com/go-xorm/xorm/xorm.go | 126 +
vendor/github.com/golang/snappy/.gitignore | 16 +
vendor/github.com/golang/snappy/AUTHORS | 15 +
vendor/github.com/golang/snappy/CONTRIBUTORS | 37 +
vendor/github.com/golang/snappy/LICENSE | 27 +
vendor/github.com/golang/snappy/README | 107 +
vendor/github.com/golang/snappy/decode.go | 237 +
.../github.com/golang/snappy/decode_amd64.go | 14 +
.../github.com/golang/snappy/decode_amd64.s | 490 +
.../github.com/golang/snappy/decode_other.go | 101 +
vendor/github.com/golang/snappy/encode.go | 285 +
.../github.com/golang/snappy/encode_amd64.go | 29 +
.../github.com/golang/snappy/encode_amd64.s | 730 +
.../github.com/golang/snappy/encode_other.go | 238 +
vendor/github.com/golang/snappy/snappy.go | 98 +
.../github.com/gorilla/websocket/.gitignore | 25 +
.../github.com/gorilla/websocket/.travis.yml | 19 +
vendor/github.com/gorilla/websocket/AUTHORS | 9 +
vendor/github.com/gorilla/websocket/LICENSE | 22 +
vendor/github.com/gorilla/websocket/README.md | 64 +
vendor/github.com/gorilla/websocket/client.go | 395 +
.../gorilla/websocket/client_clone.go | 16 +
.../gorilla/websocket/client_clone_legacy.go | 38 +
.../gorilla/websocket/compression.go | 148 +
vendor/github.com/gorilla/websocket/conn.go | 1165 +
.../gorilla/websocket/conn_write.go | 15 +
.../gorilla/websocket/conn_write_legacy.go | 18 +
vendor/github.com/gorilla/websocket/doc.go | 180 +
vendor/github.com/gorilla/websocket/json.go | 60 +
vendor/github.com/gorilla/websocket/mask.go | 54 +
.../github.com/gorilla/websocket/mask_safe.go | 15 +
.../github.com/gorilla/websocket/prepared.go | 102 +
vendor/github.com/gorilla/websocket/proxy.go | 77 +
vendor/github.com/gorilla/websocket/server.go | 363 +
vendor/github.com/gorilla/websocket/trace.go | 19 +
.../github.com/gorilla/websocket/trace_17.go | 12 +
vendor/github.com/gorilla/websocket/util.go | 237 +
.../gorilla/websocket/x_net_proxy.go | 473 +
.../github.com/henrylee2cn/goutil/.gitignore | 39 +
.../github.com/henrylee2cn/goutil/README.md | 27 +
.../github.com/henrylee2cn/goutil/currip.go | 75 +
vendor/github.com/henrylee2cn/goutil/doc.go | 7 +
.../github.com/henrylee2cn/goutil/encrypt.go | 91 +
.../henrylee2cn/goutil/errors/errors.go | 96 +
.../github.com/henrylee2cn/goutil/exported.go | 34 +
vendor/github.com/henrylee2cn/goutil/file.go | 417 +
.../github.com/henrylee2cn/goutil/gopath.go | 47 +
.../github.com/henrylee2cn/goutil/gotest.go | 31 +
.../github.com/henrylee2cn/goutil/js_url.go | 19 +
vendor/github.com/henrylee2cn/goutil/map.go | 647 +
vendor/github.com/henrylee2cn/goutil/other.go | 67 +
.../github.com/henrylee2cn/goutil/pid_file.go | 31 +
.../github.com/henrylee2cn/goutil/random.go | 78 +
vendor/github.com/henrylee2cn/goutil/sets.go | 462 +
.../github.com/henrylee2cn/goutil/string.go | 383 +
vendor/github.com/henrylee2cn/goutil/targz.go | 111 +
vendor/github.com/henrylee2cn/goutil/trace.go | 44 +
vendor/github.com/henrylee2cn/ini/.gitignore | 7 +
vendor/github.com/henrylee2cn/ini/.travis.yml | 14 +
vendor/github.com/henrylee2cn/ini/LICENSE | 191 +
vendor/github.com/henrylee2cn/ini/Makefile | 12 +
vendor/github.com/henrylee2cn/ini/README.md | 749 +
.../github.com/henrylee2cn/ini/README_ZH.md | 736 +
vendor/github.com/henrylee2cn/ini/error.go | 32 +
vendor/github.com/henrylee2cn/ini/helper.go | 79 +
vendor/github.com/henrylee2cn/ini/ini.go | 565 +
vendor/github.com/henrylee2cn/ini/key.go | 699 +
vendor/github.com/henrylee2cn/ini/parser.go | 361 +
vendor/github.com/henrylee2cn/ini/section.go | 248 +
vendor/github.com/henrylee2cn/ini/struct.go | 516 +
vendor/github.com/jinzhu/gorm/.gitignore | 3 +
vendor/github.com/jinzhu/gorm/License | 21 +
vendor/github.com/jinzhu/gorm/README.md | 41 +
vendor/github.com/jinzhu/gorm/association.go | 377 +
vendor/github.com/jinzhu/gorm/callback.go | 245 +
.../github.com/jinzhu/gorm/callback_create.go | 173 +
.../github.com/jinzhu/gorm/callback_delete.go | 63 +
.../github.com/jinzhu/gorm/callback_query.go | 104 +
.../jinzhu/gorm/callback_query_preload.go | 410 +
.../jinzhu/gorm/callback_row_query.go | 36 +
.../github.com/jinzhu/gorm/callback_save.go | 170 +
.../github.com/jinzhu/gorm/callback_update.go | 121 +
vendor/github.com/jinzhu/gorm/dialect.go | 145 +
.../github.com/jinzhu/gorm/dialect_common.go | 183 +
.../github.com/jinzhu/gorm/dialect_mysql.go | 204 +
.../jinzhu/gorm/dialect_postgres.go | 143 +
.../github.com/jinzhu/gorm/dialect_sqlite3.go | 107 +
.../jinzhu/gorm/dialects/mysql/mysql.go | 3 +
.../jinzhu/gorm/dialects/postgres/postgres.go | 80 +
.../github.com/jinzhu/gorm/docker-compose.yml | 30 +
vendor/github.com/jinzhu/gorm/errors.go | 72 +
vendor/github.com/jinzhu/gorm/field.go | 66 +
vendor/github.com/jinzhu/gorm/go.mod | 13 +
vendor/github.com/jinzhu/gorm/go.sum | 131 +
vendor/github.com/jinzhu/gorm/interface.go | 24 +
.../jinzhu/gorm/join_table_handler.go | 211 +
vendor/github.com/jinzhu/gorm/logger.go | 124 +
vendor/github.com/jinzhu/gorm/main.go | 854 +
vendor/github.com/jinzhu/gorm/model.go | 14 +
vendor/github.com/jinzhu/gorm/model_struct.go | 656 +
vendor/github.com/jinzhu/gorm/naming.go | 124 +
vendor/github.com/jinzhu/gorm/scope.go | 1440 +
vendor/github.com/jinzhu/gorm/search.go | 153 +
vendor/github.com/jinzhu/gorm/test_all.sh | 5 +
vendor/github.com/jinzhu/gorm/utils.go | 226 +
vendor/github.com/jinzhu/gorm/wercker.yml | 154 +
vendor/github.com/jinzhu/inflection/LICENSE | 21 +
vendor/github.com/jinzhu/inflection/README.md | 55 +
vendor/github.com/jinzhu/inflection/go.mod | 1 +
.../jinzhu/inflection/inflections.go | 273 +
.../github.com/jinzhu/inflection/wercker.yml | 23 +
vendor/github.com/jmoiron/sqlx/.gitignore | 24 +
vendor/github.com/jmoiron/sqlx/.travis.yml | 27 +
vendor/github.com/jmoiron/sqlx/LICENSE | 23 +
vendor/github.com/jmoiron/sqlx/README.md | 187 +
vendor/github.com/jmoiron/sqlx/bind.go | 217 +
vendor/github.com/jmoiron/sqlx/doc.go | 12 +
vendor/github.com/jmoiron/sqlx/go.mod | 7 +
vendor/github.com/jmoiron/sqlx/go.sum | 6 +
vendor/github.com/jmoiron/sqlx/named.go | 356 +
.../github.com/jmoiron/sqlx/named_context.go | 132 +
.../jmoiron/sqlx/reflectx/README.md | 17 +
.../jmoiron/sqlx/reflectx/reflect.go | 441 +
vendor/github.com/jmoiron/sqlx/sqlx.go | 1045 +
.../github.com/jmoiron/sqlx/sqlx_context.go | 346 +
.../github.com/json-iterator/go/.codecov.yml | 3 +
vendor/github.com/json-iterator/go/.gitignore | 4 +
.../github.com/json-iterator/go/.travis.yml | 14 +
vendor/github.com/json-iterator/go/Gopkg.lock | 21 +
vendor/github.com/json-iterator/go/Gopkg.toml | 26 +
vendor/github.com/json-iterator/go/LICENSE | 21 +
vendor/github.com/json-iterator/go/README.md | 87 +
vendor/github.com/json-iterator/go/adapter.go | 150 +
vendor/github.com/json-iterator/go/any.go | 325 +
.../github.com/json-iterator/go/any_array.go | 278 +
.../github.com/json-iterator/go/any_bool.go | 137 +
.../github.com/json-iterator/go/any_float.go | 83 +
.../github.com/json-iterator/go/any_int32.go | 74 +
.../github.com/json-iterator/go/any_int64.go | 74 +
.../json-iterator/go/any_invalid.go | 82 +
vendor/github.com/json-iterator/go/any_nil.go | 69 +
.../github.com/json-iterator/go/any_number.go | 123 +
.../github.com/json-iterator/go/any_object.go | 374 +
vendor/github.com/json-iterator/go/any_str.go | 166 +
.../github.com/json-iterator/go/any_uint32.go | 74 +
.../github.com/json-iterator/go/any_uint64.go | 74 +
vendor/github.com/json-iterator/go/build.sh | 12 +
vendor/github.com/json-iterator/go/config.go | 375 +
.../go/fuzzy_mode_convert_table.md | 7 +
vendor/github.com/json-iterator/go/go.mod | 11 +
vendor/github.com/json-iterator/go/go.sum | 14 +
vendor/github.com/json-iterator/go/iter.go | 322 +
.../github.com/json-iterator/go/iter_array.go | 58 +
.../github.com/json-iterator/go/iter_float.go | 339 +
.../github.com/json-iterator/go/iter_int.go | 345 +
.../json-iterator/go/iter_object.go | 251 +
.../github.com/json-iterator/go/iter_skip.go | 130 +
.../json-iterator/go/iter_skip_sloppy.go | 144 +
.../json-iterator/go/iter_skip_strict.go | 99 +
.../github.com/json-iterator/go/iter_str.go | 215 +
.../github.com/json-iterator/go/jsoniter.go | 18 +
vendor/github.com/json-iterator/go/pool.go | 42 +
vendor/github.com/json-iterator/go/reflect.go | 332 +
.../json-iterator/go/reflect_array.go | 104 +
.../json-iterator/go/reflect_dynamic.go | 70 +
.../json-iterator/go/reflect_extension.go | 483 +
.../json-iterator/go/reflect_json_number.go | 112 +
.../go/reflect_json_raw_message.go | 60 +
.../json-iterator/go/reflect_map.go | 338 +
.../json-iterator/go/reflect_marshaler.go | 217 +
.../json-iterator/go/reflect_native.go | 453 +
.../json-iterator/go/reflect_optional.go | 133 +
.../json-iterator/go/reflect_slice.go | 99 +
.../go/reflect_struct_decoder.go | 1048 +
.../go/reflect_struct_encoder.go | 210 +
vendor/github.com/json-iterator/go/stream.go | 211 +
.../json-iterator/go/stream_float.go | 111 +
.../github.com/json-iterator/go/stream_int.go | 190 +
.../github.com/json-iterator/go/stream_str.go | 372 +
vendor/github.com/json-iterator/go/test.sh | 12 +
vendor/github.com/juju/errors/.gitignore | 23 +
vendor/github.com/juju/errors/LICENSE | 191 +
vendor/github.com/juju/errors/Makefile | 24 +
vendor/github.com/juju/errors/README.md | 707 +
.../github.com/juju/errors/dependencies.tsv | 5 +
vendor/github.com/juju/errors/doc.go | 81 +
vendor/github.com/juju/errors/error.go | 176 +
vendor/github.com/juju/errors/errortypes.go | 333 +
vendor/github.com/juju/errors/functions.go | 330 +
vendor/github.com/juju/errors/path.go | 19 +
vendor/github.com/kr/pretty/.gitignore | 4 +
vendor/github.com/kr/pretty/License | 21 +
vendor/github.com/kr/pretty/Readme | 9 +
vendor/github.com/kr/pretty/diff.go | 265 +
vendor/github.com/kr/pretty/formatter.go | 328 +
vendor/github.com/kr/pretty/go.mod | 3 +
vendor/github.com/kr/pretty/pretty.go | 108 +
vendor/github.com/kr/pretty/zero.go | 41 +
vendor/github.com/kr/text/License | 19 +
vendor/github.com/kr/text/Readme | 3 +
vendor/github.com/kr/text/doc.go | 3 +
vendor/github.com/kr/text/go.mod | 3 +
vendor/github.com/kr/text/indent.go | 74 +
vendor/github.com/kr/text/wrap.go | 86 +
vendor/github.com/lib/pq/.gitignore | 4 +
vendor/github.com/lib/pq/.travis.sh | 73 +
vendor/github.com/lib/pq/.travis.yml | 44 +
vendor/github.com/lib/pq/CONTRIBUTING.md | 29 +
vendor/github.com/lib/pq/LICENSE.md | 8 +
vendor/github.com/lib/pq/README.md | 95 +
vendor/github.com/lib/pq/TESTS.md | 33 +
vendor/github.com/lib/pq/array.go | 756 +
vendor/github.com/lib/pq/buf.go | 91 +
vendor/github.com/lib/pq/conn.go | 1923 +
vendor/github.com/lib/pq/conn_go18.go | 149 +
vendor/github.com/lib/pq/connector.go | 110 +
vendor/github.com/lib/pq/copy.go | 282 +
vendor/github.com/lib/pq/doc.go | 245 +
vendor/github.com/lib/pq/encode.go | 602 +
vendor/github.com/lib/pq/error.go | 515 +
vendor/github.com/lib/pq/go.mod | 1 +
vendor/github.com/lib/pq/hstore/hstore.go | 118 +
vendor/github.com/lib/pq/notify.go | 797 +
vendor/github.com/lib/pq/oid/doc.go | 6 +
vendor/github.com/lib/pq/oid/gen.go | 93 +
vendor/github.com/lib/pq/oid/types.go | 343 +
vendor/github.com/lib/pq/rows.go | 93 +
vendor/github.com/lib/pq/scram/scram.go | 264 +
vendor/github.com/lib/pq/ssl.go | 175 +
vendor/github.com/lib/pq/ssl_permissions.go | 20 +
vendor/github.com/lib/pq/ssl_windows.go | 9 +
vendor/github.com/lib/pq/url.go | 76 +
vendor/github.com/lib/pq/user_posix.go | 24 +
vendor/github.com/lib/pq/user_windows.go | 27 +
vendor/github.com/lib/pq/uuid.go | 23 +
.../github.com/modern-go/concurrent/LICENSE | 201 +
.../github.com/modern-go/concurrent/README.md | 2 +
.../modern-go/concurrent/executor.go | 7 +
.../modern-go/concurrent/go_above_19.go | 13 +
.../modern-go/concurrent/go_below_19.go | 30 +
.../concurrent/unbounded_executor.go | 99 +
.../github.com/modern-go/reflect2/.gitignore | 2 +
.../github.com/modern-go/reflect2/.travis.yml | 15 +
.../github.com/modern-go/reflect2/Gopkg.lock | 15 +
.../github.com/modern-go/reflect2/Gopkg.toml | 35 +
vendor/github.com/modern-go/reflect2/LICENSE | 201 +
.../github.com/modern-go/reflect2/README.md | 71 +
.../modern-go/reflect2/go_above_17.go | 8 +
.../modern-go/reflect2/go_above_19.go | 14 +
.../modern-go/reflect2/go_below_17.go | 9 +
.../modern-go/reflect2/go_below_19.go | 14 +
.../github.com/modern-go/reflect2/reflect2.go | 298 +
.../modern-go/reflect2/reflect2_amd64.s | 0
.../modern-go/reflect2/reflect2_kind.go | 30 +
.../modern-go/reflect2/relfect2_386.s | 0
.../modern-go/reflect2/relfect2_amd64p32.s | 0
.../modern-go/reflect2/relfect2_arm.s | 0
.../modern-go/reflect2/relfect2_arm64.s | 0
.../modern-go/reflect2/relfect2_mips64x.s | 0
.../modern-go/reflect2/relfect2_mipsx.s | 0
.../modern-go/reflect2/relfect2_ppc64x.s | 0
.../modern-go/reflect2/relfect2_s390x.s | 0
.../modern-go/reflect2/safe_field.go | 58 +
.../github.com/modern-go/reflect2/safe_map.go | 101 +
.../modern-go/reflect2/safe_slice.go | 92 +
.../modern-go/reflect2/safe_struct.go | 29 +
.../modern-go/reflect2/safe_type.go | 78 +
vendor/github.com/modern-go/reflect2/test.sh | 12 +
.../github.com/modern-go/reflect2/type_map.go | 103 +
.../modern-go/reflect2/unsafe_array.go | 65 +
.../modern-go/reflect2/unsafe_eface.go | 59 +
.../modern-go/reflect2/unsafe_field.go | 74 +
.../modern-go/reflect2/unsafe_iface.go | 64 +
.../modern-go/reflect2/unsafe_link.go | 70 +
.../modern-go/reflect2/unsafe_map.go | 138 +
.../modern-go/reflect2/unsafe_ptr.go | 46 +
.../modern-go/reflect2/unsafe_slice.go | 177 +
.../modern-go/reflect2/unsafe_struct.go | 59 +
.../modern-go/reflect2/unsafe_type.go | 85 +
.../pelletier/go-toml/.dockerignore | 2 +
.../github.com/pelletier/go-toml/.gitignore | 5 +
.../github.com/pelletier/go-toml/.travis.yml | 22 +
.../pelletier/go-toml/CONTRIBUTING.md | 132 +
.../github.com/pelletier/go-toml/Dockerfile | 10 +
vendor/github.com/pelletier/go-toml/LICENSE | 21 +
.../go-toml/PULL_REQUEST_TEMPLATE.md | 5 +
vendor/github.com/pelletier/go-toml/README.md | 145 +
.../github.com/pelletier/go-toml/appveyor.yml | 34 +
.../pelletier/go-toml/benchmark.json | 164 +
.../github.com/pelletier/go-toml/benchmark.sh | 32 +
.../pelletier/go-toml/benchmark.toml | 244 +
.../pelletier/go-toml/benchmark.yml | 121 +
vendor/github.com/pelletier/go-toml/doc.go | 23 +
.../pelletier/go-toml/example-crlf.toml | 29 +
.../github.com/pelletier/go-toml/example.toml | 29 +
vendor/github.com/pelletier/go-toml/fuzz.go | 31 +
vendor/github.com/pelletier/go-toml/fuzz.sh | 15 +
vendor/github.com/pelletier/go-toml/go.mod | 9 +
vendor/github.com/pelletier/go-toml/go.sum | 7 +
.../pelletier/go-toml/keysparsing.go | 113 +
vendor/github.com/pelletier/go-toml/lexer.go | 752 +
.../github.com/pelletier/go-toml/marshal.go | 803 +
.../marshal_OrderPreserve_Map_test.toml | 17 +
.../go-toml/marshal_OrderPreserve_test.toml | 38 +
.../pelletier/go-toml/marshal_test.toml | 38 +
vendor/github.com/pelletier/go-toml/parser.go | 442 +
.../github.com/pelletier/go-toml/position.go | 29 +
vendor/github.com/pelletier/go-toml/token.go | 144 +
vendor/github.com/pelletier/go-toml/toml.go | 393 +
.../pelletier/go-toml/tomltree_create.go | 142 +
.../pelletier/go-toml/tomltree_write.go | 434 +
vendor/github.com/pkg/errors/.gitignore | 24 +
vendor/github.com/pkg/errors/.travis.yml | 15 +
vendor/github.com/pkg/errors/LICENSE | 23 +
vendor/github.com/pkg/errors/README.md | 52 +
vendor/github.com/pkg/errors/appveyor.yml | 32 +
vendor/github.com/pkg/errors/errors.go | 282 +
vendor/github.com/pkg/errors/stack.go | 147 +
vendor/github.com/pmezard/go-difflib/LICENSE | 27 +
.../pmezard/go-difflib/difflib/difflib.go | 772 +
vendor/github.com/siddontang/go/LICENSE | 20 +
.../github.com/siddontang/go/filelock/LICENSE | 27 +
.../go/filelock/file_lock_generic.go | 17 +
.../go/filelock/file_lock_solaris.go | 43 +
.../siddontang/go/filelock/file_lock_unix.go | 51 +
.../go/filelock/file_lock_windows.go | 36 +
vendor/github.com/siddontang/go/hack/hack.go | 27 +
.../siddontang/go/ioutil2/ioutil.go | 39 +
.../siddontang/go/ioutil2/sectionwriter.go | 69 +
vendor/github.com/siddontang/go/log/doc.go | 21 +
.../siddontang/go/log/filehandler.go | 221 +
.../github.com/siddontang/go/log/handler.go | 48 +
vendor/github.com/siddontang/go/log/log.go | 343 +
.../siddontang/go/log/sockethandler.go | 65 +
vendor/github.com/siddontang/go/num/bytes.go | 67 +
vendor/github.com/siddontang/go/num/cmp.go | 161 +
vendor/github.com/siddontang/go/num/str.go | 157 +
.../github.com/siddontang/go/snappy/LICENSE | 27 +
.../github.com/siddontang/go/snappy/decode.go | 124 +
.../github.com/siddontang/go/snappy/encode.go | 174 +
.../github.com/siddontang/go/snappy/snappy.go | 38 +
.../github.com/siddontang/go/sync2/atomic.go | 146 +
.../siddontang/go/sync2/semaphore.go | 65 +
vendor/github.com/siddontang/ledisdb/LICENSE | 21 +
.../ledisdb/config/config-docker.toml | 170 +
.../siddontang/ledisdb/config/config.go | 316 +
.../siddontang/ledisdb/config/config.toml | 170 +
.../siddontang/ledisdb/ledis/batch.go | 139 +
.../siddontang/ledisdb/ledis/const.go | 150 +
.../siddontang/ledisdb/ledis/doc.go | 58 +
.../siddontang/ledisdb/ledis/dump.go | 220 +
.../siddontang/ledisdb/ledis/event.go | 136 +
.../siddontang/ledisdb/ledis/ledis.go | 248 +
.../siddontang/ledisdb/ledis/ledis_db.go | 208 +
.../siddontang/ledisdb/ledis/migrate.go | 195 +
.../siddontang/ledisdb/ledis/replication.go | 259 +
.../siddontang/ledisdb/ledis/scan.go | 402 +
.../siddontang/ledisdb/ledis/sort.go | 235 +
.../siddontang/ledisdb/ledis/t_hash.go | 556 +
.../siddontang/ledisdb/ledis/t_kv.go | 794 +
.../siddontang/ledisdb/ledis/t_list.go | 808 +
.../siddontang/ledisdb/ledis/t_set.go | 644 +
.../siddontang/ledisdb/ledis/t_ttl.go | 217 +
.../siddontang/ledisdb/ledis/t_zset.go | 1093 +
.../siddontang/ledisdb/ledis/util.go | 103 +
.../siddontang/ledisdb/rpl/file_io.go | 363 +
.../siddontang/ledisdb/rpl/file_store.go | 416 +
.../siddontang/ledisdb/rpl/file_table.go | 571 +
.../siddontang/ledisdb/rpl/goleveldb_store.go | 225 +
.../github.com/siddontang/ledisdb/rpl/log.go | 167 +
.../github.com/siddontang/ledisdb/rpl/rpl.go | 336 +
.../siddontang/ledisdb/rpl/store.go | 36 +
.../github.com/siddontang/ledisdb/store/db.go | 169 +
.../siddontang/ledisdb/store/driver/driver.go | 57 +
.../siddontang/ledisdb/store/driver/slice.go | 21 +
.../siddontang/ledisdb/store/driver/store.go | 46 +
.../ledisdb/store/goleveldb/batch.go | 39 +
.../ledisdb/store/goleveldb/const.go | 4 +
.../siddontang/ledisdb/store/goleveldb/db.go | 204 +
.../ledisdb/store/goleveldb/iterator.go | 49 +
.../ledisdb/store/goleveldb/snapshot.go | 26 +
.../siddontang/ledisdb/store/iterator.go | 334 +
.../siddontang/ledisdb/store/leveldb/batch.go | 99 +
.../siddontang/ledisdb/store/leveldb/cache.go | 20 +
.../siddontang/ledisdb/store/leveldb/const.go | 3 +
.../siddontang/ledisdb/store/leveldb/db.go | 314 +
.../ledisdb/store/leveldb/filterpolicy.go | 21 +
.../ledisdb/store/leveldb/iterator.go | 70 +
.../ledisdb/store/leveldb/leveldb_ext.cc | 95 +
.../ledisdb/store/leveldb/leveldb_ext.h | 41 +
.../ledisdb/store/leveldb/levigo-license | 7 +
.../ledisdb/store/leveldb/options.go | 126 +
.../siddontang/ledisdb/store/leveldb/slice.go | 40 +
.../ledisdb/store/leveldb/snapshot.go | 39 +
.../siddontang/ledisdb/store/leveldb/util.go | 45 +
.../siddontang/ledisdb/store/rocksdb/batch.go | 83 +
.../siddontang/ledisdb/store/rocksdb/cache.go | 20 +
.../siddontang/ledisdb/store/rocksdb/const.go | 3 +
.../siddontang/ledisdb/store/rocksdb/db.go | 342 +
.../siddontang/ledisdb/store/rocksdb/env.go | 27 +
.../ledisdb/store/rocksdb/filterpolicy.go | 21 +
.../ledisdb/store/rocksdb/iterator.go | 70 +
.../ledisdb/store/rocksdb/options.go | 229 +
.../ledisdb/store/rocksdb/rocksdb_ext.cc | 44 +
.../ledisdb/store/rocksdb/rocksdb_ext.h | 24 +
.../siddontang/ledisdb/store/rocksdb/slice.go | 41 +
.../ledisdb/store/rocksdb/snapshot.go | 39 +
.../siddontang/ledisdb/store/rocksdb/util.go | 54 +
.../siddontang/ledisdb/store/slice.go | 9 +
.../siddontang/ledisdb/store/snapshot.go | 48 +
.../siddontang/ledisdb/store/stat.go | 37 +
.../siddontang/ledisdb/store/store.go | 62 +
.../siddontang/ledisdb/store/writebatch.go | 136 +
vendor/github.com/siddontang/rdb/LICENSE | 21 +
vendor/github.com/siddontang/rdb/README.md | 3 +
vendor/github.com/siddontang/rdb/decode.go | 128 +
vendor/github.com/siddontang/rdb/digest.go | 106 +
vendor/github.com/siddontang/rdb/encode.go | 52 +
vendor/github.com/siddontang/rdb/loader.go | 112 +
vendor/github.com/siddontang/rdb/reader.go | 332 +
.../siddontang/rdb/wandoujia-license | 21 +
vendor/github.com/ssdb/gossdb/LICENSE | 12 +
vendor/github.com/ssdb/gossdb/ssdb/ssdb.go | 190 +
vendor/github.com/stretchr/testify/LICENSE | 21 +
.../testify/assert/assertion_format.go | 484 +
.../testify/assert/assertion_format.go.tmpl | 5 +
.../testify/assert/assertion_forward.go | 956 +
.../testify/assert/assertion_forward.go.tmpl | 5 +
.../stretchr/testify/assert/assertions.go | 1416 +
.../github.com/stretchr/testify/assert/doc.go | 45 +
.../stretchr/testify/assert/errors.go | 10 +
.../testify/assert/forward_assertions.go | 16 +
.../testify/assert/http_assertions.go | 143 +
.../stretchr/testify/require/doc.go | 28 +
.../testify/require/forward_requirements.go | 16 +
.../stretchr/testify/require/require.go | 1227 +
.../stretchr/testify/require/require.go.tmpl | 6 +
.../testify/require/require_forward.go | 957 +
.../testify/require/require_forward.go.tmpl | 5 +
.../stretchr/testify/require/requirements.go | 29 +
vendor/github.com/syndtr/goleveldb/LICENSE | 24 +
.../syndtr/goleveldb/leveldb/batch.go | 349 +
.../syndtr/goleveldb/leveldb/cache/cache.go | 704 +
.../syndtr/goleveldb/leveldb/cache/lru.go | 195 +
.../syndtr/goleveldb/leveldb/comparer.go | 67 +
.../leveldb/comparer/bytes_comparer.go | 51 +
.../goleveldb/leveldb/comparer/comparer.go | 57 +
.../github.com/syndtr/goleveldb/leveldb/db.go | 1179 +
.../syndtr/goleveldb/leveldb/db_compaction.go | 854 +
.../syndtr/goleveldb/leveldb/db_iter.go | 360 +
.../syndtr/goleveldb/leveldb/db_snapshot.go | 187 +
.../syndtr/goleveldb/leveldb/db_state.go | 239 +
.../goleveldb/leveldb/db_transaction.go | 329 +
.../syndtr/goleveldb/leveldb/db_util.go | 102 +
.../syndtr/goleveldb/leveldb/db_write.go | 464 +
.../syndtr/goleveldb/leveldb/doc.go | 92 +
.../syndtr/goleveldb/leveldb/errors.go | 20 +
.../syndtr/goleveldb/leveldb/errors/errors.go | 78 +
.../syndtr/goleveldb/leveldb/filter.go | 31 +
.../syndtr/goleveldb/leveldb/filter/bloom.go | 116 +
.../syndtr/goleveldb/leveldb/filter/filter.go | 60 +
.../goleveldb/leveldb/iterator/array_iter.go | 184 +
.../leveldb/iterator/indexed_iter.go | 242 +
.../syndtr/goleveldb/leveldb/iterator/iter.go | 132 +
.../goleveldb/leveldb/iterator/merged_iter.go | 304 +
.../goleveldb/leveldb/journal/journal.go | 524 +
.../syndtr/goleveldb/leveldb/key.go | 143 +
.../syndtr/goleveldb/leveldb/memdb/memdb.go | 479 +
.../syndtr/goleveldb/leveldb/opt/options.go | 697 +
.../syndtr/goleveldb/leveldb/options.go | 107 +
.../syndtr/goleveldb/leveldb/session.go | 210 +
.../goleveldb/leveldb/session_compaction.go | 302 +
.../goleveldb/leveldb/session_record.go | 323 +
.../syndtr/goleveldb/leveldb/session_util.go | 271 +
.../syndtr/goleveldb/leveldb/storage.go | 63 +
.../goleveldb/leveldb/storage/file_storage.go | 671 +
.../leveldb/storage/file_storage_nacl.go | 34 +
.../leveldb/storage/file_storage_plan9.go | 63 +
.../leveldb/storage/file_storage_solaris.go | 81 +
.../leveldb/storage/file_storage_unix.go | 98 +
.../leveldb/storage/file_storage_windows.go | 78 +
.../goleveldb/leveldb/storage/mem_storage.go | 222 +
.../goleveldb/leveldb/storage/storage.go | 187 +
.../syndtr/goleveldb/leveldb/table.go | 531 +
.../syndtr/goleveldb/leveldb/table/reader.go | 1139 +
.../syndtr/goleveldb/leveldb/table/table.go | 177 +
.../syndtr/goleveldb/leveldb/table/writer.go | 375 +
.../syndtr/goleveldb/leveldb/util.go | 98 +
.../syndtr/goleveldb/leveldb/util/buffer.go | 293 +
.../goleveldb/leveldb/util/buffer_pool.go | 239 +
.../syndtr/goleveldb/leveldb/util/crc32.go | 30 +
.../syndtr/goleveldb/leveldb/util/hash.go | 48 +
.../syndtr/goleveldb/leveldb/util/range.go | 32 +
.../syndtr/goleveldb/leveldb/util/util.go | 73 +
.../syndtr/goleveldb/leveldb/version.go | 528 +
vendor/golang.org/x/crypto/.gitattributes | 10 -
vendor/golang.org/x/crypto/.gitignore | 2 -
vendor/golang.org/x/crypto/AUTHORS | 2 +-
vendor/golang.org/x/crypto/CONTRIBUTING.md | 31 -
vendor/golang.org/x/crypto/CONTRIBUTORS | 2 +-
vendor/golang.org/x/crypto/README | 3 -
vendor/golang.org/x/crypto/acme/acme.go | 467 +-
.../x/crypto/acme/autocert/autocert.go | 738 +-
.../x/crypto/acme/autocert/cache.go | 20 +-
.../x/crypto/acme/autocert/listener.go | 157 +
.../x/crypto/acme/autocert/renewal.go | 58 +-
vendor/golang.org/x/crypto/acme/http.go | 299 +
vendor/golang.org/x/crypto/acme/jws.go | 31 +-
vendor/golang.org/x/crypto/acme/types.go | 136 +-
.../golang.org/x/crypto/acme/version_go112.go | 27 +
vendor/golang.org/x/crypto/bcrypt/base64.go | 35 -
vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 294 -
vendor/golang.org/x/crypto/blake2b/blake2b.go | 194 -
.../x/crypto/blake2b/blake2bAVX2_amd64.go | 43 -
.../x/crypto/blake2b/blake2bAVX2_amd64.s | 762 -
.../x/crypto/blake2b/blake2b_amd64.go | 25 -
.../x/crypto/blake2b/blake2b_amd64.s | 290 -
.../x/crypto/blake2b/blake2b_generic.go | 179 -
vendor/golang.org/x/crypto/blake2s/blake2s.go | 160 -
.../x/crypto/blake2s/blake2s_386.go | 36 -
.../golang.org/x/crypto/blake2s/blake2s_386.s | 460 -
.../x/crypto/blake2s/blake2s_amd64.go | 39 -
.../x/crypto/blake2s/blake2s_amd64.s | 463 -
.../x/crypto/blake2s/blake2s_generic.go | 174 -
.../x/crypto/blake2s/blake2s_ref.go | 18 -
vendor/golang.org/x/crypto/blowfish/block.go | 159 -
vendor/golang.org/x/crypto/blowfish/cipher.go | 91 -
vendor/golang.org/x/crypto/blowfish/const.go | 199 -
vendor/golang.org/x/crypto/bn256/bn256.go | 404 -
vendor/golang.org/x/crypto/bn256/constants.go | 44 -
vendor/golang.org/x/crypto/bn256/curve.go | 278 -
vendor/golang.org/x/crypto/bn256/gfp12.go | 200 -
vendor/golang.org/x/crypto/bn256/gfp2.go | 219 -
vendor/golang.org/x/crypto/bn256/gfp6.go | 296 -
vendor/golang.org/x/crypto/bn256/optate.go | 395 -
vendor/golang.org/x/crypto/bn256/twist.go | 249 -
vendor/golang.org/x/crypto/cast5/cast5.go | 526 -
.../chacha20poly1305/chacha20poly1305.go | 83 -
.../chacha20poly1305_amd64.go | 80 -
.../chacha20poly1305/chacha20poly1305_amd64.s | 2721 -
.../chacha20poly1305_generic.go | 70 -
.../chacha20poly1305_noasm.go | 15 -
.../internal/chacha20/chacha_generic.go | 199 -
vendor/golang.org/x/crypto/codereview.cfg | 1 -
.../x/crypto/curve25519/const_amd64.h | 8 -
.../x/crypto/curve25519/const_amd64.s | 20 -
.../x/crypto/curve25519/cswap_amd64.s | 88 -
.../x/crypto/curve25519/curve25519.go | 841 -
vendor/golang.org/x/crypto/curve25519/doc.go | 23 -
.../x/crypto/curve25519/freeze_amd64.s | 73 -
.../x/crypto/curve25519/ladderstep_amd64.s | 1377 -
.../x/crypto/curve25519/mont25519_amd64.go | 240 -
.../x/crypto/curve25519/mul_amd64.s | 169 -
.../x/crypto/curve25519/square_amd64.s | 132 -
vendor/golang.org/x/crypto/ed25519/ed25519.go | 181 -
.../ed25519/internal/edwards25519/const.go | 1422 -
.../internal/edwards25519/edwards25519.go | 1771 -
.../x/crypto/ed25519/testdata/sign.input.gz | Bin 50330 -> 0 bytes
vendor/golang.org/x/crypto/hkdf/hkdf.go | 75 -
vendor/golang.org/x/crypto/md4/md4.go | 118 -
vendor/golang.org/x/crypto/md4/md4block.go | 89 -
vendor/golang.org/x/crypto/nacl/box/box.go | 86 -
.../x/crypto/nacl/secretbox/secretbox.go | 149 -
vendor/golang.org/x/crypto/ocsp/ocsp.go | 771 -
.../x/crypto/openpgp/armor/armor.go | 219 -
.../x/crypto/openpgp/armor/encode.go | 160 -
.../x/crypto/openpgp/canonical_text.go | 59 -
.../x/crypto/openpgp/clearsign/clearsign.go | 376 -
.../x/crypto/openpgp/elgamal/elgamal.go | 122 -
.../x/crypto/openpgp/errors/errors.go | 72 -
vendor/golang.org/x/crypto/openpgp/keys.go | 637 -
.../x/crypto/openpgp/packet/compressed.go | 123 -
.../x/crypto/openpgp/packet/config.go | 91 -
.../x/crypto/openpgp/packet/encrypted_key.go | 199 -
.../x/crypto/openpgp/packet/literal.go | 89 -
.../x/crypto/openpgp/packet/ocfb.go | 143 -
.../openpgp/packet/one_pass_signature.go | 73 -
.../x/crypto/openpgp/packet/opaque.go | 162 -
.../x/crypto/openpgp/packet/packet.go | 537 -
.../x/crypto/openpgp/packet/private_key.go | 380 -
.../x/crypto/openpgp/packet/public_key.go | 748 -
.../x/crypto/openpgp/packet/public_key_v3.go | 279 -
.../x/crypto/openpgp/packet/reader.go | 76 -
.../x/crypto/openpgp/packet/signature.go | 731 -
.../x/crypto/openpgp/packet/signature_v3.go | 146 -
.../openpgp/packet/symmetric_key_encrypted.go | 155 -
.../openpgp/packet/symmetrically_encrypted.go | 290 -
.../x/crypto/openpgp/packet/userattribute.go | 91 -
.../x/crypto/openpgp/packet/userid.go | 160 -
vendor/golang.org/x/crypto/openpgp/read.go | 442 -
vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 273 -
vendor/golang.org/x/crypto/openpgp/write.go | 378 -
.../x/crypto/otr/libotr_test_helper.c | 197 -
vendor/golang.org/x/crypto/otr/otr.go | 1415 -
vendor/golang.org/x/crypto/otr/smp.go | 572 -
.../golang.org/x/crypto/pkcs12/bmp-string.go | 50 -
vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 -
vendor/golang.org/x/crypto/pkcs12/errors.go | 23 -
.../x/crypto/pkcs12/internal/rc2/rc2.go | 274 -
vendor/golang.org/x/crypto/pkcs12/mac.go | 45 -
vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 -
vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 342 -
vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 -
.../golang.org/x/crypto/poly1305/poly1305.go | 32 -
.../golang.org/x/crypto/poly1305/sum_amd64.go | 22 -
.../golang.org/x/crypto/poly1305/sum_amd64.s | 125 -
.../golang.org/x/crypto/poly1305/sum_arm.go | 22 -
vendor/golang.org/x/crypto/poly1305/sum_arm.s | 427 -
.../golang.org/x/crypto/poly1305/sum_ref.go | 141 -
.../x/crypto/ripemd160/ripemd160.go | 120 -
.../x/crypto/ripemd160/ripemd160block.go | 161 -
.../x/crypto/salsa20/salsa/hsalsa20.go | 144 -
.../x/crypto/salsa20/salsa/salsa2020_amd64.s | 889 -
.../x/crypto/salsa20/salsa/salsa208.go | 199 -
.../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 -
.../x/crypto/salsa20/salsa/salsa20_ref.go | 234 -
vendor/golang.org/x/crypto/salsa20/salsa20.go | 54 -
vendor/golang.org/x/crypto/scrypt/scrypt.go | 243 -
vendor/golang.org/x/crypto/sha3/doc.go | 66 -
vendor/golang.org/x/crypto/sha3/hashes.go | 65 -
vendor/golang.org/x/crypto/sha3/keccakf.go | 412 -
.../golang.org/x/crypto/sha3/keccakf_amd64.s | 390 -
vendor/golang.org/x/crypto/sha3/register.go | 18 -
vendor/golang.org/x/crypto/sha3/sha3.go | 193 -
vendor/golang.org/x/crypto/sha3/shake.go | 60 -
.../sha3/testdata/keccakKats.json.deflate | Bin 521342 -> 0 bytes
vendor/golang.org/x/crypto/sha3/xor.go | 16 -
.../golang.org/x/crypto/sha3/xor_generic.go | 28 -
.../golang.org/x/crypto/sha3/xor_unaligned.go | 58 -
.../golang.org/x/crypto/ssh/agent/client.go | 659 -
.../golang.org/x/crypto/ssh/agent/forward.go | 103 -
.../golang.org/x/crypto/ssh/agent/keyring.go | 215 -
.../golang.org/x/crypto/ssh/agent/server.go | 451 -
vendor/golang.org/x/crypto/ssh/buffer.go | 98 -
vendor/golang.org/x/crypto/ssh/certs.go | 503 -
vendor/golang.org/x/crypto/ssh/channel.go | 633 -
vendor/golang.org/x/crypto/ssh/cipher.go | 627 -
vendor/golang.org/x/crypto/ssh/client.go | 211 -
vendor/golang.org/x/crypto/ssh/client_auth.go | 475 -
vendor/golang.org/x/crypto/ssh/common.go | 371 -
vendor/golang.org/x/crypto/ssh/connection.go | 143 -
vendor/golang.org/x/crypto/ssh/doc.go | 18 -
vendor/golang.org/x/crypto/ssh/handshake.go | 625 -
vendor/golang.org/x/crypto/ssh/kex.go | 540 -
vendor/golang.org/x/crypto/ssh/keys.go | 905 -
vendor/golang.org/x/crypto/ssh/mac.go | 61 -
vendor/golang.org/x/crypto/ssh/messages.go | 758 -
vendor/golang.org/x/crypto/ssh/mux.go | 330 -
vendor/golang.org/x/crypto/ssh/server.go | 491 -
vendor/golang.org/x/crypto/ssh/session.go | 627 -
vendor/golang.org/x/crypto/ssh/tcpip.go | 407 -
.../x/crypto/ssh/terminal/terminal.go | 951 -
.../golang.org/x/crypto/ssh/terminal/util.go | 119 -
.../x/crypto/ssh/terminal/util_bsd.go | 12 -
.../x/crypto/ssh/terminal/util_linux.go | 11 -
.../x/crypto/ssh/terminal/util_plan9.go | 58 -
.../x/crypto/ssh/terminal/util_solaris.go | 73 -
.../x/crypto/ssh/terminal/util_windows.go | 155 -
vendor/golang.org/x/crypto/ssh/test/doc.go | 7 -
.../golang.org/x/crypto/ssh/testdata/doc.go | 8 -
.../golang.org/x/crypto/ssh/testdata/keys.go | 120 -
vendor/golang.org/x/crypto/ssh/transport.go | 375 -
vendor/golang.org/x/crypto/tea/cipher.go | 109 -
vendor/golang.org/x/crypto/twofish/twofish.go | 342 -
vendor/golang.org/x/crypto/xtea/block.go | 66 -
vendor/golang.org/x/crypto/xtea/cipher.go | 82 -
vendor/golang.org/x/crypto/xts/xts.go | 138 -
vendor/golang.org/x/{time => net}/AUTHORS | 0
.../golang.org/x/{time => net}/CONTRIBUTORS | 0
vendor/golang.org/x/{time => net}/LICENSE | 0
vendor/golang.org/x/{time => net}/PATENTS | 0
vendor/golang.org/x/net/context/context.go | 405 +-
.../x/net/context/ctxhttp/cancelreq.go | 19 -
.../x/net/context/ctxhttp/cancelreq_go14.go | 23 -
.../x/net/context/ctxhttp/ctxhttp.go | 145 -
vendor/golang.org/x/net/context/go17.go | 72 +
vendor/golang.org/x/net/context/go19.go | 20 +
vendor/golang.org/x/net/context/pre_go17.go | 300 +
vendor/golang.org/x/net/context/pre_go19.go | 109 +
vendor/golang.org/x/net/html/atom/atom.go | 78 +
vendor/golang.org/x/net/html/atom/gen.go | 712 +
vendor/golang.org/x/net/html/atom/table.go | 783 +
.../golang.org/x/net/html/charset/charset.go | 257 +
vendor/golang.org/x/net/html/const.go | 112 +
vendor/golang.org/x/net/html/doc.go | 106 +
vendor/golang.org/x/net/html/doctype.go | 156 +
vendor/golang.org/x/net/html/entity.go | 2253 +
vendor/golang.org/x/net/html/escape.go | 258 +
vendor/golang.org/x/net/html/foreign.go | 226 +
vendor/golang.org/x/net/html/node.go | 220 +
vendor/golang.org/x/net/html/parse.go | 2417 +
vendor/golang.org/x/net/html/render.go | 271 +
vendor/golang.org/x/net/html/token.go | 1219 +
vendor/golang.org/x/net/idna/idna10.0.0.go | 734 +
vendor/golang.org/x/net/idna/idna9.0.0.go | 682 +
vendor/golang.org/x/net/idna/punycode.go | 203 +
vendor/golang.org/x/net/idna/tables10.0.0.go | 4559 ++
vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 ++
vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 ++
vendor/golang.org/x/net/idna/trie.go | 72 +
vendor/golang.org/x/net/idna/trieval.go | 119 +
vendor/golang.org/x/sys/AUTHORS | 3 +
vendor/golang.org/x/sys/CONTRIBUTORS | 3 +
vendor/golang.org/x/sys/LICENSE | 27 +
vendor/golang.org/x/sys/PATENTS | 22 +
vendor/golang.org/x/sys/unix/.gitignore | 2 +
vendor/golang.org/x/sys/unix/README.md | 173 +
.../golang.org/x/sys/unix/affinity_linux.go | 128 +
vendor/golang.org/x/sys/unix/aliases.go | 14 +
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 17 +
vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 +
.../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 +
vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 +
.../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 +
.../x/sys/unix/asm_dragonfly_amd64.s | 29 +
.../golang.org/x/sys/unix/asm_freebsd_386.s | 29 +
.../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 +
.../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 +
.../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 +
vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 +
.../golang.org/x/sys/unix/asm_linux_amd64.s | 57 +
vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 +
.../golang.org/x/sys/unix/asm_linux_arm64.s | 52 +
.../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 +
.../golang.org/x/sys/unix/asm_linux_mipsx.s | 54 +
.../golang.org/x/sys/unix/asm_linux_ppc64x.s | 44 +
.../golang.org/x/sys/unix/asm_linux_riscv64.s | 54 +
.../golang.org/x/sys/unix/asm_linux_s390x.s | 56 +
vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 +
.../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 +
vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 +
.../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 +
.../golang.org/x/sys/unix/asm_openbsd_386.s | 29 +
.../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 +
.../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 +
.../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 +
.../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 +
.../golang.org/x/sys/unix/bluetooth_linux.go | 35 +
vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 +
.../unix/constants.go} | 14 +-
vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 +
vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 +
vendor/golang.org/x/sys/unix/dev_darwin.go | 24 +
vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 +
vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 +
vendor/golang.org/x/sys/unix/dev_linux.go | 42 +
vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 +
vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 +
vendor/golang.org/x/sys/unix/dirent.go | 102 +
vendor/golang.org/x/sys/unix/endian_big.go | 9 +
vendor/golang.org/x/sys/unix/endian_little.go | 9 +
vendor/golang.org/x/sys/unix/env_unix.go | 31 +
.../x/sys/unix/errors_freebsd_386.go | 227 +
.../x/sys/unix/errors_freebsd_amd64.go | 227 +
.../x/sys/unix/errors_freebsd_arm.go | 226 +
vendor/golang.org/x/sys/unix/fcntl.go | 32 +
vendor/golang.org/x/sys/unix/fcntl_darwin.go | 18 +
.../x/sys/unix/fcntl_linux_32bit.go | 13 +
vendor/golang.org/x/sys/unix/gccgo.go | 62 +
vendor/golang.org/x/sys/unix/gccgo_c.c | 39 +
.../x/sys/unix/gccgo_linux_amd64.go | 20 +
vendor/golang.org/x/sys/unix/ioctl.go | 30 +
vendor/golang.org/x/sys/unix/mkall.sh | 227 +
vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 +
vendor/golang.org/x/sys/unix/mkerrors.sh | 666 +
vendor/golang.org/x/sys/unix/mkpost.go | 122 +
vendor/golang.org/x/sys/unix/mksyscall.go | 407 +
.../x/sys/unix/mksyscall_aix_ppc.go | 415 +
.../x/sys/unix/mksyscall_aix_ppc64.go | 614 +
.../x/sys/unix/mksyscall_solaris.go | 335 +
.../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 +
vendor/golang.org/x/sys/unix/mksysnum.go | 190 +
vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 +
.../golang.org/x/sys/unix/pledge_openbsd.go | 163 +
vendor/golang.org/x/sys/unix/race.go | 30 +
vendor/golang.org/x/sys/unix/race0.go | 25 +
.../x/sys/unix/readdirent_getdents.go | 12 +
.../x/sys/unix/readdirent_getdirentries.go | 19 +
.../golang.org/x/sys/unix/sockcmsg_linux.go | 36 +
vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 120 +
vendor/golang.org/x/sys/unix/str.go | 26 +
vendor/golang.org/x/sys/unix/syscall.go | 53 +
vendor/golang.org/x/sys/unix/syscall_aix.go | 573 +
.../golang.org/x/sys/unix/syscall_aix_ppc.go | 50 +
.../x/sys/unix/syscall_aix_ppc64.go | 81 +
vendor/golang.org/x/sys/unix/syscall_bsd.go | 618 +
.../golang.org/x/sys/unix/syscall_darwin.go | 718 +
.../x/sys/unix/syscall_darwin_386.go | 63 +
.../x/sys/unix/syscall_darwin_amd64.go | 63 +
.../x/sys/unix/syscall_darwin_arm.go | 64 +
.../x/sys/unix/syscall_darwin_arm64.go | 66 +
.../x/sys/unix/syscall_darwin_libSystem.go | 31 +
.../x/sys/unix/syscall_dragonfly.go | 556 +
.../x/sys/unix/syscall_dragonfly_amd64.go | 52 +
.../golang.org/x/sys/unix/syscall_freebsd.go | 914 +
.../x/sys/unix/syscall_freebsd_386.go | 52 +
.../x/sys/unix/syscall_freebsd_amd64.go | 52 +
.../x/sys/unix/syscall_freebsd_arm.go | 52 +
.../x/sys/unix/syscall_freebsd_arm64.go | 52 +
vendor/golang.org/x/sys/unix/syscall_linux.go | 1868 +
.../x/sys/unix/syscall_linux_386.go | 386 +
.../x/sys/unix/syscall_linux_amd64.go | 190 +
.../x/sys/unix/syscall_linux_amd64_gc.go | 13 +
.../x/sys/unix/syscall_linux_arm.go | 287 +
.../x/sys/unix/syscall_linux_arm64.go | 223 +
.../golang.org/x/sys/unix/syscall_linux_gc.go | 14 +
.../x/sys/unix/syscall_linux_gc_386.go | 16 +
.../x/sys/unix/syscall_linux_gccgo_386.go | 30 +
.../x/sys/unix/syscall_linux_gccgo_arm.go | 20 +
.../x/sys/unix/syscall_linux_mips64x.go | 222 +
.../x/sys/unix/syscall_linux_mipsx.go | 234 +
.../x/sys/unix/syscall_linux_ppc64x.go | 152 +
.../x/sys/unix/syscall_linux_riscv64.go | 226 +
.../x/sys/unix/syscall_linux_s390x.go | 338 +
.../x/sys/unix/syscall_linux_sparc64.go | 147 +
.../golang.org/x/sys/unix/syscall_netbsd.go | 655 +
.../x/sys/unix/syscall_netbsd_386.go | 33 +
.../x/sys/unix/syscall_netbsd_amd64.go | 33 +
.../x/sys/unix/syscall_netbsd_arm.go | 33 +
.../x/sys/unix/syscall_netbsd_arm64.go | 33 +
.../golang.org/x/sys/unix/syscall_openbsd.go | 449 +
.../x/sys/unix/syscall_openbsd_386.go | 37 +
.../x/sys/unix/syscall_openbsd_amd64.go | 37 +
.../x/sys/unix/syscall_openbsd_arm.go | 37 +
.../x/sys/unix/syscall_openbsd_arm64.go | 37 +
.../golang.org/x/sys/unix/syscall_solaris.go | 754 +
.../x/sys/unix/syscall_solaris_amd64.go | 23 +
vendor/golang.org/x/sys/unix/syscall_unix.go | 431 +
.../golang.org/x/sys/unix/syscall_unix_gc.go | 15 +
.../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 +
vendor/golang.org/x/sys/unix/timestruct.go | 82 +
vendor/golang.org/x/sys/unix/types_aix.go | 237 +
vendor/golang.org/x/sys/unix/types_darwin.go | 283 +
.../golang.org/x/sys/unix/types_dragonfly.go | 263 +
vendor/golang.org/x/sys/unix/types_freebsd.go | 400 +
vendor/golang.org/x/sys/unix/types_netbsd.go | 290 +
vendor/golang.org/x/sys/unix/types_openbsd.go | 283 +
vendor/golang.org/x/sys/unix/types_solaris.go | 266 +
.../golang.org/x/sys/unix/unveil_openbsd.go | 42 +
vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 +
.../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1374 +
.../x/sys/unix/zerrors_aix_ppc64.go | 1375 +
.../x/sys/unix/zerrors_darwin_386.go | 1783 +
.../x/sys/unix/zerrors_darwin_amd64.go | 1783 +
.../x/sys/unix/zerrors_darwin_arm.go | 1783 +
.../x/sys/unix/zerrors_darwin_arm64.go | 1783 +
.../x/sys/unix/zerrors_dragonfly_amd64.go | 1650 +
.../x/sys/unix/zerrors_freebsd_386.go | 1793 +
.../x/sys/unix/zerrors_freebsd_amd64.go | 1794 +
.../x/sys/unix/zerrors_freebsd_arm.go | 1802 +
.../x/sys/unix/zerrors_freebsd_arm64.go | 1794 +
.../x/sys/unix/zerrors_linux_386.go | 3018 ++
.../x/sys/unix/zerrors_linux_amd64.go | 3018 ++
.../x/sys/unix/zerrors_linux_arm.go | 3024 ++
.../x/sys/unix/zerrors_linux_arm64.go | 3009 ++
.../x/sys/unix/zerrors_linux_mips.go | 3025 ++
.../x/sys/unix/zerrors_linux_mips64.go | 3025 ++
.../x/sys/unix/zerrors_linux_mips64le.go | 3025 ++
.../x/sys/unix/zerrors_linux_mipsle.go | 3025 ++
.../x/sys/unix/zerrors_linux_ppc64.go | 3080 ++
.../x/sys/unix/zerrors_linux_ppc64le.go | 3080 ++
.../x/sys/unix/zerrors_linux_riscv64.go | 3005 ++
.../x/sys/unix/zerrors_linux_s390x.go | 3078 ++
.../x/sys/unix/zerrors_linux_sparc64.go | 3074 ++
.../x/sys/unix/zerrors_netbsd_386.go | 1772 +
.../x/sys/unix/zerrors_netbsd_amd64.go | 1762 +
.../x/sys/unix/zerrors_netbsd_arm.go | 1751 +
.../x/sys/unix/zerrors_netbsd_arm64.go | 1762 +
.../x/sys/unix/zerrors_openbsd_386.go | 1654 +
.../x/sys/unix/zerrors_openbsd_amd64.go | 1765 +
.../x/sys/unix/zerrors_openbsd_arm.go | 1656 +
.../x/sys/unix/zerrors_openbsd_arm64.go | 1789 +
.../x/sys/unix/zerrors_solaris_amd64.go | 1532 +
.../golang.org/x/sys/unix/zptrace386_linux.go | 80 +
.../golang.org/x/sys/unix/zptracearm_linux.go | 41 +
.../x/sys/unix/zptracemips_linux.go | 50 +
.../x/sys/unix/zptracemipsle_linux.go | 50 +
.../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1484 +
.../x/sys/unix/zsyscall_aix_ppc64.go | 1442 +
.../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 +
.../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 +
.../x/sys/unix/zsyscall_darwin_386.1_11.go | 1810 +
.../x/sys/unix/zsyscall_darwin_386.go | 2505 +
.../x/sys/unix/zsyscall_darwin_386.s | 284 +
.../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1810 +
.../x/sys/unix/zsyscall_darwin_amd64.go | 2520 +
.../x/sys/unix/zsyscall_darwin_amd64.s | 286 +
.../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1793 +
.../x/sys/unix/zsyscall_darwin_arm.go | 2483 +
.../x/sys/unix/zsyscall_darwin_arm.s | 282 +
.../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 1793 +
.../x/sys/unix/zsyscall_darwin_arm64.go | 2483 +
.../x/sys/unix/zsyscall_darwin_arm64.s | 282 +
.../x/sys/unix/zsyscall_dragonfly_amd64.go | 1676 +
.../x/sys/unix/zsyscall_freebsd_386.go | 2025 +
.../x/sys/unix/zsyscall_freebsd_amd64.go | 2025 +
.../x/sys/unix/zsyscall_freebsd_arm.go | 2025 +
.../x/sys/unix/zsyscall_freebsd_arm64.go | 2025 +
.../x/sys/unix/zsyscall_linux_386.go | 2270 +
.../x/sys/unix/zsyscall_linux_amd64.go | 2437 +
.../x/sys/unix/zsyscall_linux_arm.go | 2407 +
.../x/sys/unix/zsyscall_linux_arm64.go | 2294 +
.../x/sys/unix/zsyscall_linux_mips.go | 2450 +
.../x/sys/unix/zsyscall_linux_mips64.go | 2421 +
.../x/sys/unix/zsyscall_linux_mips64le.go | 2421 +
.../x/sys/unix/zsyscall_linux_mipsle.go | 2450 +
.../x/sys/unix/zsyscall_linux_ppc64.go | 2499 +
.../x/sys/unix/zsyscall_linux_ppc64le.go | 2499 +
.../x/sys/unix/zsyscall_linux_riscv64.go | 2274 +
.../x/sys/unix/zsyscall_linux_s390x.go | 2269 +
.../x/sys/unix/zsyscall_linux_sparc64.go | 2432 +
.../x/sys/unix/zsyscall_netbsd_386.go | 1826 +
.../x/sys/unix/zsyscall_netbsd_amd64.go | 1826 +
.../x/sys/unix/zsyscall_netbsd_arm.go | 1826 +
.../x/sys/unix/zsyscall_netbsd_arm64.go | 1826 +
.../x/sys/unix/zsyscall_openbsd_386.go | 1692 +
.../x/sys/unix/zsyscall_openbsd_amd64.go | 1692 +
.../x/sys/unix/zsyscall_openbsd_arm.go | 1692 +
.../x/sys/unix/zsyscall_openbsd_arm64.go | 1692 +
.../x/sys/unix/zsyscall_solaris_amd64.go | 1953 +
.../x/sys/unix/zsysctl_openbsd_386.go | 272 +
.../x/sys/unix/zsysctl_openbsd_amd64.go | 270 +
.../x/sys/unix/zsysctl_openbsd_arm.go | 272 +
.../x/sys/unix/zsysctl_openbsd_arm64.go | 275 +
.../x/sys/unix/zsysnum_darwin_386.go | 436 +
.../x/sys/unix/zsysnum_darwin_amd64.go | 438 +
.../x/sys/unix/zsysnum_darwin_arm.go | 436 +
.../x/sys/unix/zsysnum_darwin_arm64.go | 436 +
.../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 +
.../x/sys/unix/zsysnum_freebsd_386.go | 396 +
.../x/sys/unix/zsysnum_freebsd_amd64.go | 396 +
.../x/sys/unix/zsysnum_freebsd_arm.go | 396 +
.../x/sys/unix/zsysnum_freebsd_arm64.go | 396 +
.../x/sys/unix/zsysnum_linux_386.go | 432 +
.../x/sys/unix/zsysnum_linux_amd64.go | 354 +
.../x/sys/unix/zsysnum_linux_arm.go | 396 +
.../x/sys/unix/zsysnum_linux_arm64.go | 299 +
.../x/sys/unix/zsysnum_linux_mips.go | 417 +
.../x/sys/unix/zsysnum_linux_mips64.go | 347 +
.../x/sys/unix/zsysnum_linux_mips64le.go | 347 +
.../x/sys/unix/zsysnum_linux_mipsle.go | 417 +
.../x/sys/unix/zsysnum_linux_ppc64.go | 396 +
.../x/sys/unix/zsysnum_linux_ppc64le.go | 396 +
.../x/sys/unix/zsysnum_linux_riscv64.go | 298 +
.../x/sys/unix/zsysnum_linux_s390x.go | 361 +
.../x/sys/unix/zsysnum_linux_sparc64.go | 376 +
.../x/sys/unix/zsysnum_netbsd_386.go | 274 +
.../x/sys/unix/zsysnum_netbsd_amd64.go | 274 +
.../x/sys/unix/zsysnum_netbsd_arm.go | 274 +
.../x/sys/unix/zsysnum_netbsd_arm64.go | 274 +
.../x/sys/unix/zsysnum_openbsd_386.go | 218 +
.../x/sys/unix/zsysnum_openbsd_amd64.go | 218 +
.../x/sys/unix/zsysnum_openbsd_arm.go | 218 +
.../x/sys/unix/zsysnum_openbsd_arm64.go | 217 +
.../golang.org/x/sys/unix/ztypes_aix_ppc.go | 352 +
.../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 356 +
.../x/sys/unix/ztypes_darwin_386.go | 499 +
.../x/sys/unix/ztypes_darwin_amd64.go | 509 +
.../x/sys/unix/ztypes_darwin_arm.go | 500 +
.../x/sys/unix/ztypes_darwin_arm64.go | 509 +
.../x/sys/unix/ztypes_dragonfly_amd64.go | 469 +
.../x/sys/unix/ztypes_freebsd_386.go | 700 +
.../x/sys/unix/ztypes_freebsd_amd64.go | 706 +
.../x/sys/unix/ztypes_freebsd_arm.go | 683 +
.../x/sys/unix/ztypes_freebsd_arm64.go | 684 +
.../golang.org/x/sys/unix/ztypes_linux_386.go | 2523 +
.../x/sys/unix/ztypes_linux_amd64.go | 2537 +
.../golang.org/x/sys/unix/ztypes_linux_arm.go | 2514 +
.../x/sys/unix/ztypes_linux_arm64.go | 2516 +
.../x/sys/unix/ztypes_linux_mips.go | 2520 +
.../x/sys/unix/ztypes_linux_mips64.go | 2518 +
.../x/sys/unix/ztypes_linux_mips64le.go | 2518 +
.../x/sys/unix/ztypes_linux_mipsle.go | 2520 +
.../x/sys/unix/ztypes_linux_ppc64.go | 2526 +
.../x/sys/unix/ztypes_linux_ppc64le.go | 2526 +
.../x/sys/unix/ztypes_linux_riscv64.go | 2543 +
.../x/sys/unix/ztypes_linux_s390x.go | 2540 +
.../x/sys/unix/ztypes_linux_sparc64.go | 2521 +
.../x/sys/unix/ztypes_netbsd_386.go | 466 +
.../x/sys/unix/ztypes_netbsd_amd64.go | 473 +
.../x/sys/unix/ztypes_netbsd_arm.go | 471 +
.../x/sys/unix/ztypes_netbsd_arm64.go | 473 +
.../x/sys/unix/ztypes_openbsd_386.go | 571 +
.../x/sys/unix/ztypes_openbsd_amd64.go | 571 +
.../x/sys/unix/ztypes_openbsd_arm.go | 572 +
.../x/sys/unix/ztypes_openbsd_arm64.go | 565 +
.../x/sys/unix/ztypes_solaris_amd64.go | 442 +
vendor/golang.org/x/sys/windows/aliases.go | 13 +
.../x/sys/windows/asm_windows_386.s | 13 +
.../x/sys/windows/asm_windows_amd64.s | 13 +
.../x/sys/windows/asm_windows_arm.s | 11 +
.../golang.org/x/sys/windows/dll_windows.go | 378 +
.../golang.org/x/sys/windows/env_windows.go | 61 +
vendor/golang.org/x/sys/windows/eventlog.go | 20 +
.../golang.org/x/sys/windows/exec_windows.go | 97 +
.../x/sys/windows/memory_windows.go | 26 +
vendor/golang.org/x/sys/windows/mkerrors.bash | 63 +
.../x/sys/windows/mkknownfolderids.bash | 27 +
vendor/golang.org/x/sys/windows/mksyscall.go | 9 +
vendor/golang.org/x/sys/windows/race.go | 30 +
vendor/golang.org/x/sys/windows/race0.go | 25 +
.../x/sys/windows/security_windows.go | 854 +
vendor/golang.org/x/sys/windows/service.go | 229 +
vendor/golang.org/x/sys/windows/str.go | 22 +
vendor/golang.org/x/sys/windows/syscall.go | 74 +
.../x/sys/windows/syscall_windows.go | 1320 +
.../golang.org/x/sys/windows/types_windows.go | 1668 +
.../x/sys/windows/types_windows_386.go | 22 +
.../x/sys/windows/types_windows_amd64.go | 22 +
.../x/sys/windows/types_windows_arm.go | 22 +
.../x/sys/windows/zerrors_windows.go | 6853 +++
.../x/sys/windows/zknownfolderids_windows.go | 149 +
.../x/sys/windows/zsyscall_windows.go | 3337 ++
vendor/golang.org/x/text/AUTHORS | 3 +
vendor/golang.org/x/text/CONTRIBUTORS | 3 +
vendor/golang.org/x/text/LICENSE | 27 +
vendor/golang.org/x/text/PATENTS | 22 +
.../x/text/encoding/charmap/charmap.go | 249 +
.../x/text/encoding/charmap/maketables.go | 556 +
.../x/text/encoding/charmap/tables.go | 7410 +++
vendor/golang.org/x/text/encoding/encoding.go | 335 +
.../x/text/encoding/htmlindex/gen.go | 173 +
.../x/text/encoding/htmlindex/htmlindex.go | 86 +
.../x/text/encoding/htmlindex/map.go | 105 +
.../x/text/encoding/htmlindex/tables.go | 353 +
.../text/encoding/internal/identifier/gen.go | 142 +
.../internal/identifier/identifier.go | 81 +
.../text/encoding/internal/identifier/mib.go | 1619 +
.../x/text/encoding/internal/internal.go | 75 +
.../x/text/encoding/japanese/all.go | 12 +
.../x/text/encoding/japanese/eucjp.go | 225 +
.../x/text/encoding/japanese/iso2022jp.go | 299 +
.../x/text/encoding/japanese/maketables.go | 161 +
.../x/text/encoding/japanese/shiftjis.go | 189 +
.../x/text/encoding/japanese/tables.go | 26971 ++++++++++
.../x/text/encoding/korean/euckr.go | 177 +
.../x/text/encoding/korean/maketables.go | 143 +
.../x/text/encoding/korean/tables.go | 34152 ++++++++++++
.../x/text/encoding/simplifiedchinese/all.go | 12 +
.../x/text/encoding/simplifiedchinese/gbk.go | 269 +
.../encoding/simplifiedchinese/hzgb2312.go | 245 +
.../encoding/simplifiedchinese/maketables.go | 161 +
.../text/encoding/simplifiedchinese/tables.go | 43999 ++++++++++++++++
.../text/encoding/traditionalchinese/big5.go | 199 +
.../encoding/traditionalchinese/maketables.go | 140 +
.../encoding/traditionalchinese/tables.go | 37142 +++++++++++++
.../x/text/encoding/unicode/override.go | 82 +
.../x/text/encoding/unicode/unicode.go | 434 +
.../x/text/internal/language/common.go | 16 +
.../x/text/internal/language/compact.go | 29 +
.../text/internal/language/compact/compact.go | 61 +
.../x/text/internal/language/compact/gen.go | 64 +
.../internal/language/compact/gen_index.go | 113 +
.../internal/language/compact/gen_parents.go | 54 +
.../internal/language/compact/language.go | 260 +
.../text/internal/language/compact/parents.go | 120 +
.../text/internal/language/compact/tables.go | 1015 +
.../x/text/internal/language/compact/tags.go | 91 +
.../x/text/internal/language/compose.go | 167 +
.../x/text/internal/language/coverage.go | 28 +
.../x/text/internal/language/gen.go | 1520 +
.../x/text/internal/language/gen_common.go | 20 +
.../x/text/internal/language/language.go | 596 +
.../x/text/internal/language/lookup.go | 412 +
.../x/text/internal/language/match.go | 226 +
.../x/text/internal/language/parse.go | 594 +
.../x/text/internal/language/tables.go | 3431 ++
.../x/text/internal/language/tags.go | 48 +
vendor/golang.org/x/text/internal/tag/tag.go | 100 +
.../internal/utf8internal/utf8internal.go | 87 +
vendor/golang.org/x/text/language/coverage.go | 187 +
vendor/golang.org/x/text/language/doc.go | 102 +
vendor/golang.org/x/text/language/gen.go | 305 +
vendor/golang.org/x/text/language/go1_1.go | 38 +
vendor/golang.org/x/text/language/go1_2.go | 11 +
vendor/golang.org/x/text/language/language.go | 601 +
vendor/golang.org/x/text/language/match.go | 735 +
vendor/golang.org/x/text/language/parse.go | 228 +
vendor/golang.org/x/text/language/tables.go | 298 +
vendor/golang.org/x/text/language/tags.go | 145 +
vendor/golang.org/x/text/runes/cond.go | 187 +
vendor/golang.org/x/text/runes/runes.go | 355 +
.../x/text/secure/bidirule/bidirule.go | 336 +
.../secure/bidirule/bidirule10.0.0.go} | 8 +-
.../x/text/secure/bidirule/bidirule9.0.0.go | 14 +
.../golang.org/x/text/transform/transform.go | 705 +
vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 +
.../golang.org/x/text/unicode/bidi/bracket.go | 335 +
vendor/golang.org/x/text/unicode/bidi/core.go | 1058 +
vendor/golang.org/x/text/unicode/bidi/gen.go | 133 +
.../x/text/unicode/bidi/gen_ranges.go | 57 +
.../x/text/unicode/bidi/gen_trieval.go | 64 +
vendor/golang.org/x/text/unicode/bidi/prop.go | 206 +
.../x/text/unicode/bidi/tables10.0.0.go | 1815 +
.../x/text/unicode/bidi/tables11.0.0.go | 1887 +
.../x/text/unicode/bidi/tables9.0.0.go | 1781 +
.../golang.org/x/text/unicode/bidi/trieval.go | 60 +
.../x/text/unicode/norm/composition.go | 512 +
.../x/text/unicode/norm/forminfo.go | 278 +
.../golang.org/x/text/unicode/norm/input.go | 109 +
vendor/golang.org/x/text/unicode/norm/iter.go | 458 +
.../x/text/unicode/norm/maketables.go | 986 +
.../x/text/unicode/norm/normalize.go | 609 +
.../x/text/unicode/norm/readwriter.go | 125 +
.../x/text/unicode/norm/tables10.0.0.go | 7657 +++
.../x/text/unicode/norm/tables11.0.0.go | 7693 +++
.../x/text/unicode/norm/tables9.0.0.go | 7637 +++
.../x/text/unicode/norm/transform.go | 88 +
vendor/golang.org/x/text/unicode/norm/trie.go | 54 +
.../golang.org/x/text/unicode/norm/triegen.go | 117 +
vendor/golang.org/x/time/CONTRIBUTING.md | 31 -
vendor/golang.org/x/time/README | 1 -
vendor/golang.org/x/time/rate/rate.go | 371 -
vendor/google.golang.org/appengine/LICENSE | 202 +
.../appengine/cloudsql/cloudsql.go | 62 +
.../appengine/cloudsql/cloudsql_classic.go | 17 +
.../appengine/cloudsql/cloudsql_vm.go | 16 +
vendor/gopkg.in/check.v1/.gitignore | 4 +
vendor/gopkg.in/check.v1/.travis.yml | 3 +
vendor/gopkg.in/check.v1/LICENSE | 25 +
vendor/gopkg.in/check.v1/README.md | 20 +
vendor/gopkg.in/check.v1/TODO | 2 +
vendor/gopkg.in/check.v1/benchmark.go | 187 +
vendor/gopkg.in/check.v1/check.go | 882 +
vendor/gopkg.in/check.v1/checkers.go | 524 +
vendor/gopkg.in/check.v1/helpers.go | 231 +
vendor/gopkg.in/check.v1/printer.go | 168 +
vendor/gopkg.in/check.v1/reporter.go | 88 +
vendor/gopkg.in/check.v1/run.go | 175 +
.../gopkg.in/dgrijalva/jwt-go.v3/.gitignore | 4 +
.../gopkg.in/dgrijalva/jwt-go.v3/.travis.yml | 13 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/LICENSE | 8 +
.../dgrijalva/jwt-go.v3/MIGRATION_GUIDE.md | 97 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/README.md | 100 +
.../dgrijalva/jwt-go.v3/VERSION_HISTORY.md | 118 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/claims.go | 134 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/doc.go | 4 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/ecdsa.go | 148 +
.../dgrijalva/jwt-go.v3/ecdsa_utils.go | 67 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/errors.go | 59 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/hmac.go | 95 +
.../dgrijalva/jwt-go.v3/map_claims.go | 94 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/none.go | 52 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/parser.go | 148 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/rsa.go | 101 +
.../gopkg.in/dgrijalva/jwt-go.v3/rsa_pss.go | 126 +
.../gopkg.in/dgrijalva/jwt-go.v3/rsa_utils.go | 101 +
.../dgrijalva/jwt-go.v3/signing_method.go | 35 +
vendor/gopkg.in/dgrijalva/jwt-go.v3/token.go | 108 +
vendor/modules.txt | 161 +
vendor/vendor.json | 6 -
vendor/xorm.io/builder/.drone.yml | 37 +
vendor/xorm.io/builder/LICENSE | 27 +
vendor/xorm.io/builder/README.md | 206 +
vendor/xorm.io/builder/builder.go | 394 +
vendor/xorm.io/builder/builder_delete.go | 27 +
vendor/xorm.io/builder/builder_insert.go | 89 +
vendor/xorm.io/builder/builder_limit.go | 100 +
vendor/xorm.io/builder/builder_select.go | 145 +
vendor/xorm.io/builder/builder_union.go | 47 +
vendor/xorm.io/builder/builder_update.go | 46 +
vendor/xorm.io/builder/cond.go | 74 +
vendor/xorm.io/builder/cond_and.go | 61 +
vendor/xorm.io/builder/cond_between.go | 65 +
vendor/xorm.io/builder/cond_compare.go | 160 +
vendor/xorm.io/builder/cond_eq.go | 112 +
vendor/xorm.io/builder/cond_expr.go | 39 +
vendor/xorm.io/builder/cond_if.go | 49 +
vendor/xorm.io/builder/cond_in.go | 237 +
vendor/xorm.io/builder/cond_like.go | 41 +
vendor/xorm.io/builder/cond_neq.go | 94 +
vendor/xorm.io/builder/cond_not.go | 77 +
vendor/xorm.io/builder/cond_notin.go | 234 +
vendor/xorm.io/builder/cond_null.go | 59 +
vendor/xorm.io/builder/cond_or.go | 69 +
vendor/xorm.io/builder/doc.go | 120 +
vendor/xorm.io/builder/error.go | 40 +
vendor/xorm.io/builder/go.mod | 6 +
vendor/xorm.io/builder/go.sum | 9 +
vendor/xorm.io/builder/sql.go | 156 +
vendor/xorm.io/builder/string_builder.go | 119 +
vendor/xorm.io/core/.drone.yml | 42 +
vendor/xorm.io/core/.gitignore | 1 +
vendor/xorm.io/core/LICENSE | 27 +
vendor/xorm.io/core/README.md | 118 +
vendor/xorm.io/core/benchmark.sh | 1 +
vendor/xorm.io/core/cache.go | 95 +
vendor/xorm.io/core/column.go | 166 +
vendor/xorm.io/core/converstion.go | 12 +
vendor/xorm.io/core/db.go | 227 +
vendor/xorm.io/core/dialect.go | 327 +
vendor/xorm.io/core/driver.go | 31 +
vendor/xorm.io/core/error.go | 14 +
vendor/xorm.io/core/filter.go | 84 +
vendor/xorm.io/core/go.mod | 13 +
vendor/xorm.io/core/go.sum | 23 +
vendor/xorm.io/core/ilogger.go | 37 +
vendor/xorm.io/core/index.go | 72 +
vendor/xorm.io/core/mapper.go | 258 +
vendor/xorm.io/core/pk.go | 30 +
vendor/xorm.io/core/rows.go | 338 +
vendor/xorm.io/core/scan.go | 66 +
vendor/xorm.io/core/stmt.go | 166 +
vendor/xorm.io/core/table.go | 155 +
vendor/xorm.io/core/tx.go | 153 +
vendor/xorm.io/core/type.go | 323 +
1475 files changed, 624335 insertions(+), 45264 deletions(-)
create mode 100644 go.mod
create mode 100644 go.sum
create mode 100644 vendor/github.com/bradfitz/gomemcache/LICENSE
create mode 100644 vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
create mode 100644 vendor/github.com/bradfitz/gomemcache/memcache/selector.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/.gitignore
create mode 100644 vendor/github.com/couchbase/go-couchbase/.travis.yml
create mode 100644 vendor/github.com/couchbase/go-couchbase/LICENSE
create mode 100644 vendor/github.com/couchbase/go-couchbase/README.markdown
create mode 100644 vendor/github.com/couchbase/go-couchbase/audit.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/client.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/conn_pool.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/ddocs.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/observe.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/pools.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/port_map.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/streaming.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/tap.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/upr.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/users.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/util.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/vbmap.go
create mode 100644 vendor/github.com/couchbase/go-couchbase/views.go
create mode 100644 vendor/github.com/couchbase/gomemcached/.gitignore
create mode 100644 vendor/github.com/couchbase/gomemcached/LICENSE
create mode 100644 vendor/github.com/couchbase/gomemcached/README.markdown
create mode 100644 vendor/github.com/couchbase/gomemcached/client/mc.go
create mode 100644 vendor/github.com/couchbase/gomemcached/client/tap_feed.go
create mode 100644 vendor/github.com/couchbase/gomemcached/client/transport.go
create mode 100644 vendor/github.com/couchbase/gomemcached/client/upr_feed.go
create mode 100644 vendor/github.com/couchbase/gomemcached/mc_constants.go
create mode 100644 vendor/github.com/couchbase/gomemcached/mc_req.go
create mode 100644 vendor/github.com/couchbase/gomemcached/mc_res.go
create mode 100644 vendor/github.com/couchbase/gomemcached/tap.go
create mode 100644 vendor/github.com/couchbase/goutils/LICENSE.md
create mode 100644 vendor/github.com/couchbase/goutils/logging/logger.go
create mode 100644 vendor/github.com/couchbase/goutils/logging/logger_golog.go
create mode 100644 vendor/github.com/couchbase/goutils/scramsha/scramsha.go
create mode 100644 vendor/github.com/couchbase/goutils/scramsha/scramsha_http.go
create mode 100644 vendor/github.com/cupcake/rdb/.gitignore
create mode 100644 vendor/github.com/cupcake/rdb/.travis.yml
create mode 100644 vendor/github.com/cupcake/rdb/LICENCE
create mode 100644 vendor/github.com/cupcake/rdb/README.md
create mode 100644 vendor/github.com/cupcake/rdb/crc64/crc64.go
create mode 100644 vendor/github.com/cupcake/rdb/decoder.go
create mode 100644 vendor/github.com/cupcake/rdb/encoder.go
create mode 100644 vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go
create mode 100644 vendor/github.com/cupcake/rdb/slice_buffer.go
create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE
create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go
create mode 100644 vendor/github.com/edsrzf/mmap-go/.gitignore
create mode 100644 vendor/github.com/edsrzf/mmap-go/LICENSE
create mode 100644 vendor/github.com/edsrzf/mmap-go/README.md
create mode 100644 vendor/github.com/edsrzf/mmap-go/mmap.go
create mode 100644 vendor/github.com/edsrzf/mmap-go/mmap_unix.go
create mode 100644 vendor/github.com/edsrzf/mmap-go/mmap_windows.go
delete mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go
create mode 100644 vendor/github.com/facebookgo/ensure/.travis.yml
create mode 100644 vendor/github.com/facebookgo/ensure/ensure.go
create mode 100644 vendor/github.com/facebookgo/ensure/license
create mode 100644 vendor/github.com/facebookgo/ensure/patents
create mode 100644 vendor/github.com/facebookgo/ensure/readme.md
create mode 100644 vendor/github.com/facebookgo/freeport/.travis.yml
create mode 100644 vendor/github.com/facebookgo/freeport/freeport.go
create mode 100644 vendor/github.com/facebookgo/freeport/license
create mode 100644 vendor/github.com/facebookgo/freeport/patents
create mode 100644 vendor/github.com/facebookgo/freeport/readme.md
create mode 100644 vendor/github.com/facebookgo/stack/.travis.yml
create mode 100644 vendor/github.com/facebookgo/stack/license
create mode 100644 vendor/github.com/facebookgo/stack/patents
create mode 100644 vendor/github.com/facebookgo/stack/readme.md
create mode 100644 vendor/github.com/facebookgo/stack/stack.go
create mode 100644 vendor/github.com/facebookgo/subset/.travis.yml
create mode 100644 vendor/github.com/facebookgo/subset/license
create mode 100644 vendor/github.com/facebookgo/subset/patents
create mode 100644 vendor/github.com/facebookgo/subset/readme.md
create mode 100644 vendor/github.com/facebookgo/subset/subset.go
create mode 100644 vendor/github.com/flosch/pongo2/.gitattributes
create mode 100644 vendor/github.com/flosch/pongo2/.gitignore
create mode 100644 vendor/github.com/flosch/pongo2/.travis.yml
create mode 100644 vendor/github.com/flosch/pongo2/AUTHORS
create mode 100644 vendor/github.com/flosch/pongo2/LICENSE
create mode 100644 vendor/github.com/flosch/pongo2/README.md
create mode 100644 vendor/github.com/flosch/pongo2/context.go
create mode 100644 vendor/github.com/flosch/pongo2/doc.go
create mode 100644 vendor/github.com/flosch/pongo2/error.go
create mode 100644 vendor/github.com/flosch/pongo2/filters.go
create mode 100644 vendor/github.com/flosch/pongo2/filters_builtin.go
create mode 100644 vendor/github.com/flosch/pongo2/go.mod
create mode 100644 vendor/github.com/flosch/pongo2/helpers.go
create mode 100644 vendor/github.com/flosch/pongo2/lexer.go
create mode 100644 vendor/github.com/flosch/pongo2/nodes.go
create mode 100644 vendor/github.com/flosch/pongo2/nodes_html.go
create mode 100644 vendor/github.com/flosch/pongo2/nodes_wrapper.go
create mode 100644 vendor/github.com/flosch/pongo2/options.go
create mode 100644 vendor/github.com/flosch/pongo2/parser.go
create mode 100644 vendor/github.com/flosch/pongo2/parser_document.go
create mode 100644 vendor/github.com/flosch/pongo2/parser_expression.go
create mode 100644 vendor/github.com/flosch/pongo2/pongo2.go
create mode 100644 vendor/github.com/flosch/pongo2/tags.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_autoescape.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_block.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_comment.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_cycle.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_extends.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_filter.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_firstof.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_for.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_if.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_ifchanged.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_ifequal.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_ifnotequal.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_import.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_include.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_lorem.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_macro.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_now.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_set.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_spaceless.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_ssi.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_templatetag.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_widthratio.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_with.go
create mode 100644 vendor/github.com/flosch/pongo2/template.go
create mode 100644 vendor/github.com/flosch/pongo2/template_loader.go
create mode 100644 vendor/github.com/flosch/pongo2/template_sets.go
create mode 100644 vendor/github.com/flosch/pongo2/value.go
create mode 100644 vendor/github.com/flosch/pongo2/variable.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig
create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore
create mode 100644 vendor/github.com/fsnotify/fsnotify/.travis.yml
create mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS
create mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE
create mode 100644 vendor/github.com/fsnotify/fsnotify/README.md
create mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
create mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go
create mode 100644 vendor/github.com/garyburd/redigo/LICENSE
create mode 100644 vendor/github.com/garyburd/redigo/internal/commandinfo.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/conn.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/doc.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/go16.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/go17.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/go18.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/log.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/pool.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/pool17.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/pubsub.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/redis.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/reply.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/scan.go
create mode 100644 vendor/github.com/garyburd/redigo/redis/script.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/.gitignore
create mode 100644 vendor/github.com/go-sql-driver/mysql/.travis.yml
create mode 100644 vendor/github.com/go-sql-driver/mysql/AUTHORS
create mode 100644 vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
create mode 100644 vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
create mode 100644 vendor/github.com/go-sql-driver/mysql/LICENSE
create mode 100644 vendor/github.com/go-sql-driver/mysql/README.md
create mode 100644 vendor/github.com/go-sql-driver/mysql/appengine.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/auth.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/buffer.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/collations.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/connection.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/connection_go18.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/const.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/driver.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/dsn.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/errors.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/fields.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/infile.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/packets.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/result.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/rows.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/statement.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/transaction.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/utils.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/utils_go17.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/utils_go18.go
create mode 100644 vendor/github.com/go-xorm/xorm/.drone.yml
create mode 100644 vendor/github.com/go-xorm/xorm/.gitignore
create mode 100644 vendor/github.com/go-xorm/xorm/CONTRIBUTING.md
create mode 100644 vendor/github.com/go-xorm/xorm/LICENSE
create mode 100644 vendor/github.com/go-xorm/xorm/README.md
create mode 100644 vendor/github.com/go-xorm/xorm/README_CN.md
create mode 100644 vendor/github.com/go-xorm/xorm/cache_lru.go
create mode 100644 vendor/github.com/go-xorm/xorm/cache_memory_store.go
create mode 100644 vendor/github.com/go-xorm/xorm/context_cache.go
create mode 100644 vendor/github.com/go-xorm/xorm/convert.go
create mode 100644 vendor/github.com/go-xorm/xorm/dialect_mssql.go
create mode 100644 vendor/github.com/go-xorm/xorm/dialect_mysql.go
create mode 100644 vendor/github.com/go-xorm/xorm/dialect_oracle.go
create mode 100644 vendor/github.com/go-xorm/xorm/dialect_postgres.go
create mode 100644 vendor/github.com/go-xorm/xorm/dialect_sqlite3.go
create mode 100644 vendor/github.com/go-xorm/xorm/doc.go
create mode 100644 vendor/github.com/go-xorm/xorm/engine.go
create mode 100644 vendor/github.com/go-xorm/xorm/engine_cond.go
create mode 100644 vendor/github.com/go-xorm/xorm/engine_context.go
create mode 100644 vendor/github.com/go-xorm/xorm/engine_group.go
create mode 100644 vendor/github.com/go-xorm/xorm/engine_group_policy.go
create mode 100644 vendor/github.com/go-xorm/xorm/engine_table.go
create mode 100644 vendor/github.com/go-xorm/xorm/error.go
create mode 100644 vendor/github.com/go-xorm/xorm/gen_reserved.sh
create mode 100644 vendor/github.com/go-xorm/xorm/go.mod
create mode 100644 vendor/github.com/go-xorm/xorm/go.sum
create mode 100644 vendor/github.com/go-xorm/xorm/helpers.go
create mode 100644 vendor/github.com/go-xorm/xorm/helpler_time.go
create mode 100644 vendor/github.com/go-xorm/xorm/interface.go
create mode 100644 vendor/github.com/go-xorm/xorm/json.go
create mode 100644 vendor/github.com/go-xorm/xorm/logger.go
create mode 100644 vendor/github.com/go-xorm/xorm/pg_reserved.txt
create mode 100644 vendor/github.com/go-xorm/xorm/processors.go
create mode 100644 vendor/github.com/go-xorm/xorm/rows.go
create mode 100644 vendor/github.com/go-xorm/xorm/session.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_cols.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_cond.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_context.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_convert.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_delete.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_exist.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_find.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_get.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_insert.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_iterate.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_query.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_raw.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_schema.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_stats.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_tx.go
create mode 100644 vendor/github.com/go-xorm/xorm/session_update.go
create mode 100644 vendor/github.com/go-xorm/xorm/statement.go
create mode 100644 vendor/github.com/go-xorm/xorm/syslogger.go
create mode 100644 vendor/github.com/go-xorm/xorm/tag.go
create mode 100644 vendor/github.com/go-xorm/xorm/test_mssql.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_mssql_cache.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_mymysql.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_mymysql_cache.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_mysql.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_mysql_cache.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_postgres.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_postgres_cache.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_sqlite.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_sqlite_cache.sh
create mode 100644 vendor/github.com/go-xorm/xorm/test_tidb.sh
create mode 100644 vendor/github.com/go-xorm/xorm/transaction.go
create mode 100644 vendor/github.com/go-xorm/xorm/types.go
create mode 100644 vendor/github.com/go-xorm/xorm/xorm.go
create mode 100644 vendor/github.com/golang/snappy/.gitignore
create mode 100644 vendor/github.com/golang/snappy/AUTHORS
create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS
create mode 100644 vendor/github.com/golang/snappy/LICENSE
create mode 100644 vendor/github.com/golang/snappy/README
create mode 100644 vendor/github.com/golang/snappy/decode.go
create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go
create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s
create mode 100644 vendor/github.com/golang/snappy/decode_other.go
create mode 100644 vendor/github.com/golang/snappy/encode.go
create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go
create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s
create mode 100644 vendor/github.com/golang/snappy/encode_other.go
create mode 100644 vendor/github.com/golang/snappy/snappy.go
create mode 100644 vendor/github.com/gorilla/websocket/.gitignore
create mode 100644 vendor/github.com/gorilla/websocket/.travis.yml
create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS
create mode 100644 vendor/github.com/gorilla/websocket/LICENSE
create mode 100644 vendor/github.com/gorilla/websocket/README.md
create mode 100644 vendor/github.com/gorilla/websocket/client.go
create mode 100644 vendor/github.com/gorilla/websocket/client_clone.go
create mode 100644 vendor/github.com/gorilla/websocket/client_clone_legacy.go
create mode 100644 vendor/github.com/gorilla/websocket/compression.go
create mode 100644 vendor/github.com/gorilla/websocket/conn.go
create mode 100644 vendor/github.com/gorilla/websocket/conn_write.go
create mode 100644 vendor/github.com/gorilla/websocket/conn_write_legacy.go
create mode 100644 vendor/github.com/gorilla/websocket/doc.go
create mode 100644 vendor/github.com/gorilla/websocket/json.go
create mode 100644 vendor/github.com/gorilla/websocket/mask.go
create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go
create mode 100644 vendor/github.com/gorilla/websocket/prepared.go
create mode 100644 vendor/github.com/gorilla/websocket/proxy.go
create mode 100644 vendor/github.com/gorilla/websocket/server.go
create mode 100644 vendor/github.com/gorilla/websocket/trace.go
create mode 100644 vendor/github.com/gorilla/websocket/trace_17.go
create mode 100644 vendor/github.com/gorilla/websocket/util.go
create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/.gitignore
create mode 100644 vendor/github.com/henrylee2cn/goutil/README.md
create mode 100644 vendor/github.com/henrylee2cn/goutil/currip.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/doc.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/encrypt.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/errors/errors.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/exported.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/file.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/gopath.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/gotest.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/js_url.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/map.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/other.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/pid_file.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/random.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/sets.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/string.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/targz.go
create mode 100644 vendor/github.com/henrylee2cn/goutil/trace.go
create mode 100644 vendor/github.com/henrylee2cn/ini/.gitignore
create mode 100644 vendor/github.com/henrylee2cn/ini/.travis.yml
create mode 100644 vendor/github.com/henrylee2cn/ini/LICENSE
create mode 100644 vendor/github.com/henrylee2cn/ini/Makefile
create mode 100644 vendor/github.com/henrylee2cn/ini/README.md
create mode 100644 vendor/github.com/henrylee2cn/ini/README_ZH.md
create mode 100644 vendor/github.com/henrylee2cn/ini/error.go
create mode 100644 vendor/github.com/henrylee2cn/ini/helper.go
create mode 100644 vendor/github.com/henrylee2cn/ini/ini.go
create mode 100644 vendor/github.com/henrylee2cn/ini/key.go
create mode 100644 vendor/github.com/henrylee2cn/ini/parser.go
create mode 100644 vendor/github.com/henrylee2cn/ini/section.go
create mode 100644 vendor/github.com/henrylee2cn/ini/struct.go
create mode 100644 vendor/github.com/jinzhu/gorm/.gitignore
create mode 100644 vendor/github.com/jinzhu/gorm/License
create mode 100644 vendor/github.com/jinzhu/gorm/README.md
create mode 100644 vendor/github.com/jinzhu/gorm/association.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_create.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_delete.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_query.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_query_preload.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_row_query.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_save.go
create mode 100644 vendor/github.com/jinzhu/gorm/callback_update.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect_common.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect_mysql.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect_postgres.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialect_sqlite3.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go
create mode 100644 vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go
create mode 100644 vendor/github.com/jinzhu/gorm/docker-compose.yml
create mode 100644 vendor/github.com/jinzhu/gorm/errors.go
create mode 100644 vendor/github.com/jinzhu/gorm/field.go
create mode 100644 vendor/github.com/jinzhu/gorm/go.mod
create mode 100644 vendor/github.com/jinzhu/gorm/go.sum
create mode 100644 vendor/github.com/jinzhu/gorm/interface.go
create mode 100644 vendor/github.com/jinzhu/gorm/join_table_handler.go
create mode 100644 vendor/github.com/jinzhu/gorm/logger.go
create mode 100644 vendor/github.com/jinzhu/gorm/main.go
create mode 100644 vendor/github.com/jinzhu/gorm/model.go
create mode 100644 vendor/github.com/jinzhu/gorm/model_struct.go
create mode 100644 vendor/github.com/jinzhu/gorm/naming.go
create mode 100644 vendor/github.com/jinzhu/gorm/scope.go
create mode 100644 vendor/github.com/jinzhu/gorm/search.go
create mode 100644 vendor/github.com/jinzhu/gorm/test_all.sh
create mode 100644 vendor/github.com/jinzhu/gorm/utils.go
create mode 100644 vendor/github.com/jinzhu/gorm/wercker.yml
create mode 100644 vendor/github.com/jinzhu/inflection/LICENSE
create mode 100644 vendor/github.com/jinzhu/inflection/README.md
create mode 100644 vendor/github.com/jinzhu/inflection/go.mod
create mode 100644 vendor/github.com/jinzhu/inflection/inflections.go
create mode 100644 vendor/github.com/jinzhu/inflection/wercker.yml
create mode 100644 vendor/github.com/jmoiron/sqlx/.gitignore
create mode 100644 vendor/github.com/jmoiron/sqlx/.travis.yml
create mode 100644 vendor/github.com/jmoiron/sqlx/LICENSE
create mode 100644 vendor/github.com/jmoiron/sqlx/README.md
create mode 100644 vendor/github.com/jmoiron/sqlx/bind.go
create mode 100644 vendor/github.com/jmoiron/sqlx/doc.go
create mode 100644 vendor/github.com/jmoiron/sqlx/go.mod
create mode 100644 vendor/github.com/jmoiron/sqlx/go.sum
create mode 100644 vendor/github.com/jmoiron/sqlx/named.go
create mode 100644 vendor/github.com/jmoiron/sqlx/named_context.go
create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/README.md
create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx.go
create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx_context.go
create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml
create mode 100644 vendor/github.com/json-iterator/go/.gitignore
create mode 100644 vendor/github.com/json-iterator/go/.travis.yml
create mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock
create mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml
create mode 100644 vendor/github.com/json-iterator/go/LICENSE
create mode 100644 vendor/github.com/json-iterator/go/README.md
create mode 100644 vendor/github.com/json-iterator/go/adapter.go
create mode 100644 vendor/github.com/json-iterator/go/any.go
create mode 100644 vendor/github.com/json-iterator/go/any_array.go
create mode 100644 vendor/github.com/json-iterator/go/any_bool.go
create mode 100644 vendor/github.com/json-iterator/go/any_float.go
create mode 100644 vendor/github.com/json-iterator/go/any_int32.go
create mode 100644 vendor/github.com/json-iterator/go/any_int64.go
create mode 100644 vendor/github.com/json-iterator/go/any_invalid.go
create mode 100644 vendor/github.com/json-iterator/go/any_nil.go
create mode 100644 vendor/github.com/json-iterator/go/any_number.go
create mode 100644 vendor/github.com/json-iterator/go/any_object.go
create mode 100644 vendor/github.com/json-iterator/go/any_str.go
create mode 100644 vendor/github.com/json-iterator/go/any_uint32.go
create mode 100644 vendor/github.com/json-iterator/go/any_uint64.go
create mode 100644 vendor/github.com/json-iterator/go/build.sh
create mode 100644 vendor/github.com/json-iterator/go/config.go
create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
create mode 100644 vendor/github.com/json-iterator/go/go.mod
create mode 100644 vendor/github.com/json-iterator/go/go.sum
create mode 100644 vendor/github.com/json-iterator/go/iter.go
create mode 100644 vendor/github.com/json-iterator/go/iter_array.go
create mode 100644 vendor/github.com/json-iterator/go/iter_float.go
create mode 100644 vendor/github.com/json-iterator/go/iter_int.go
create mode 100644 vendor/github.com/json-iterator/go/iter_object.go
create mode 100644 vendor/github.com/json-iterator/go/iter_skip.go
create mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go
create mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go
create mode 100644 vendor/github.com/json-iterator/go/iter_str.go
create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go
create mode 100644 vendor/github.com/json-iterator/go/pool.go
create mode 100644 vendor/github.com/json-iterator/go/reflect.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go
create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go
create mode 100644 vendor/github.com/json-iterator/go/stream.go
create mode 100644 vendor/github.com/json-iterator/go/stream_float.go
create mode 100644 vendor/github.com/json-iterator/go/stream_int.go
create mode 100644 vendor/github.com/json-iterator/go/stream_str.go
create mode 100644 vendor/github.com/json-iterator/go/test.sh
create mode 100644 vendor/github.com/juju/errors/.gitignore
create mode 100644 vendor/github.com/juju/errors/LICENSE
create mode 100644 vendor/github.com/juju/errors/Makefile
create mode 100644 vendor/github.com/juju/errors/README.md
create mode 100644 vendor/github.com/juju/errors/dependencies.tsv
create mode 100644 vendor/github.com/juju/errors/doc.go
create mode 100644 vendor/github.com/juju/errors/error.go
create mode 100644 vendor/github.com/juju/errors/errortypes.go
create mode 100644 vendor/github.com/juju/errors/functions.go
create mode 100644 vendor/github.com/juju/errors/path.go
create mode 100644 vendor/github.com/kr/pretty/.gitignore
create mode 100644 vendor/github.com/kr/pretty/License
create mode 100644 vendor/github.com/kr/pretty/Readme
create mode 100644 vendor/github.com/kr/pretty/diff.go
create mode 100644 vendor/github.com/kr/pretty/formatter.go
create mode 100644 vendor/github.com/kr/pretty/go.mod
create mode 100644 vendor/github.com/kr/pretty/pretty.go
create mode 100644 vendor/github.com/kr/pretty/zero.go
create mode 100644 vendor/github.com/kr/text/License
create mode 100644 vendor/github.com/kr/text/Readme
create mode 100644 vendor/github.com/kr/text/doc.go
create mode 100644 vendor/github.com/kr/text/go.mod
create mode 100644 vendor/github.com/kr/text/indent.go
create mode 100644 vendor/github.com/kr/text/wrap.go
create mode 100644 vendor/github.com/lib/pq/.gitignore
create mode 100644 vendor/github.com/lib/pq/.travis.sh
create mode 100644 vendor/github.com/lib/pq/.travis.yml
create mode 100644 vendor/github.com/lib/pq/CONTRIBUTING.md
create mode 100644 vendor/github.com/lib/pq/LICENSE.md
create mode 100644 vendor/github.com/lib/pq/README.md
create mode 100644 vendor/github.com/lib/pq/TESTS.md
create mode 100644 vendor/github.com/lib/pq/array.go
create mode 100644 vendor/github.com/lib/pq/buf.go
create mode 100644 vendor/github.com/lib/pq/conn.go
create mode 100644 vendor/github.com/lib/pq/conn_go18.go
create mode 100644 vendor/github.com/lib/pq/connector.go
create mode 100644 vendor/github.com/lib/pq/copy.go
create mode 100644 vendor/github.com/lib/pq/doc.go
create mode 100644 vendor/github.com/lib/pq/encode.go
create mode 100644 vendor/github.com/lib/pq/error.go
create mode 100644 vendor/github.com/lib/pq/go.mod
create mode 100644 vendor/github.com/lib/pq/hstore/hstore.go
create mode 100644 vendor/github.com/lib/pq/notify.go
create mode 100644 vendor/github.com/lib/pq/oid/doc.go
create mode 100644 vendor/github.com/lib/pq/oid/gen.go
create mode 100644 vendor/github.com/lib/pq/oid/types.go
create mode 100644 vendor/github.com/lib/pq/rows.go
create mode 100644 vendor/github.com/lib/pq/scram/scram.go
create mode 100644 vendor/github.com/lib/pq/ssl.go
create mode 100644 vendor/github.com/lib/pq/ssl_permissions.go
create mode 100644 vendor/github.com/lib/pq/ssl_windows.go
create mode 100644 vendor/github.com/lib/pq/url.go
create mode 100644 vendor/github.com/lib/pq/user_posix.go
create mode 100644 vendor/github.com/lib/pq/user_windows.go
create mode 100644 vendor/github.com/lib/pq/uuid.go
create mode 100644 vendor/github.com/modern-go/concurrent/LICENSE
create mode 100644 vendor/github.com/modern-go/concurrent/README.md
create mode 100644 vendor/github.com/modern-go/concurrent/executor.go
create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go
create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go
create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go
create mode 100644 vendor/github.com/modern-go/reflect2/.gitignore
create mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml
create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock
create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml
create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE
create mode 100644 vendor/github.com/modern-go/reflect2/README.md
create mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go
create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go
create mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go
create mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go
create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go
create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s
create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s
create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go
create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go
create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go
create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go
create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go
create mode 100644 vendor/github.com/modern-go/reflect2/test.sh
create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go
create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go
create mode 100644 vendor/github.com/pelletier/go-toml/.dockerignore
create mode 100644 vendor/github.com/pelletier/go-toml/.gitignore
create mode 100644 vendor/github.com/pelletier/go-toml/.travis.yml
create mode 100644 vendor/github.com/pelletier/go-toml/CONTRIBUTING.md
create mode 100644 vendor/github.com/pelletier/go-toml/Dockerfile
create mode 100644 vendor/github.com/pelletier/go-toml/LICENSE
create mode 100644 vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
create mode 100644 vendor/github.com/pelletier/go-toml/README.md
create mode 100644 vendor/github.com/pelletier/go-toml/appveyor.yml
create mode 100644 vendor/github.com/pelletier/go-toml/benchmark.json
create mode 100644 vendor/github.com/pelletier/go-toml/benchmark.sh
create mode 100644 vendor/github.com/pelletier/go-toml/benchmark.toml
create mode 100644 vendor/github.com/pelletier/go-toml/benchmark.yml
create mode 100644 vendor/github.com/pelletier/go-toml/doc.go
create mode 100644 vendor/github.com/pelletier/go-toml/example-crlf.toml
create mode 100644 vendor/github.com/pelletier/go-toml/example.toml
create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go
create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.sh
create mode 100644 vendor/github.com/pelletier/go-toml/go.mod
create mode 100644 vendor/github.com/pelletier/go-toml/go.sum
create mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go
create mode 100644 vendor/github.com/pelletier/go-toml/lexer.go
create mode 100644 vendor/github.com/pelletier/go-toml/marshal.go
create mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_Map_test.toml
create mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml
create mode 100644 vendor/github.com/pelletier/go-toml/marshal_test.toml
create mode 100644 vendor/github.com/pelletier/go-toml/parser.go
create mode 100644 vendor/github.com/pelletier/go-toml/position.go
create mode 100644 vendor/github.com/pelletier/go-toml/token.go
create mode 100644 vendor/github.com/pelletier/go-toml/toml.go
create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go
create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go
create mode 100644 vendor/github.com/pkg/errors/.gitignore
create mode 100644 vendor/github.com/pkg/errors/.travis.yml
create mode 100644 vendor/github.com/pkg/errors/LICENSE
create mode 100644 vendor/github.com/pkg/errors/README.md
create mode 100644 vendor/github.com/pkg/errors/appveyor.yml
create mode 100644 vendor/github.com/pkg/errors/errors.go
create mode 100644 vendor/github.com/pkg/errors/stack.go
create mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE
create mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go
create mode 100644 vendor/github.com/siddontang/go/LICENSE
create mode 100644 vendor/github.com/siddontang/go/filelock/LICENSE
create mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_generic.go
create mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_solaris.go
create mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_unix.go
create mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_windows.go
create mode 100644 vendor/github.com/siddontang/go/hack/hack.go
create mode 100644 vendor/github.com/siddontang/go/ioutil2/ioutil.go
create mode 100644 vendor/github.com/siddontang/go/ioutil2/sectionwriter.go
create mode 100644 vendor/github.com/siddontang/go/log/doc.go
create mode 100644 vendor/github.com/siddontang/go/log/filehandler.go
create mode 100644 vendor/github.com/siddontang/go/log/handler.go
create mode 100644 vendor/github.com/siddontang/go/log/log.go
create mode 100644 vendor/github.com/siddontang/go/log/sockethandler.go
create mode 100644 vendor/github.com/siddontang/go/num/bytes.go
create mode 100644 vendor/github.com/siddontang/go/num/cmp.go
create mode 100644 vendor/github.com/siddontang/go/num/str.go
create mode 100644 vendor/github.com/siddontang/go/snappy/LICENSE
create mode 100644 vendor/github.com/siddontang/go/snappy/decode.go
create mode 100644 vendor/github.com/siddontang/go/snappy/encode.go
create mode 100644 vendor/github.com/siddontang/go/snappy/snappy.go
create mode 100644 vendor/github.com/siddontang/go/sync2/atomic.go
create mode 100644 vendor/github.com/siddontang/go/sync2/semaphore.go
create mode 100644 vendor/github.com/siddontang/ledisdb/LICENSE
create mode 100644 vendor/github.com/siddontang/ledisdb/config/config-docker.toml
create mode 100644 vendor/github.com/siddontang/ledisdb/config/config.go
create mode 100644 vendor/github.com/siddontang/ledisdb/config/config.toml
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/batch.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/const.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/doc.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/dump.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/event.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/ledis.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/migrate.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/replication.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/scan.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/sort.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_hash.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_kv.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_list.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_set.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_zset.go
create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/util.go
create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_io.go
create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_store.go
create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_table.go
create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go
create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/log.go
create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/rpl.go
create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/store.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/db.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/driver.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/slice.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/store.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/iterator.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/const.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/db.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/levigo-license
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/options.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/util.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/slice.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/snapshot.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/stat.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/store.go
create mode 100644 vendor/github.com/siddontang/ledisdb/store/writebatch.go
create mode 100644 vendor/github.com/siddontang/rdb/LICENSE
create mode 100644 vendor/github.com/siddontang/rdb/README.md
create mode 100644 vendor/github.com/siddontang/rdb/decode.go
create mode 100644 vendor/github.com/siddontang/rdb/digest.go
create mode 100644 vendor/github.com/siddontang/rdb/encode.go
create mode 100644 vendor/github.com/siddontang/rdb/loader.go
create mode 100644 vendor/github.com/siddontang/rdb/reader.go
create mode 100644 vendor/github.com/siddontang/rdb/wandoujia-license
create mode 100644 vendor/github.com/ssdb/gossdb/LICENSE
create mode 100644 vendor/github.com/ssdb/gossdb/ssdb/ssdb.go
create mode 100644 vendor/github.com/stretchr/testify/LICENSE
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go
create mode 100644 vendor/github.com/stretchr/testify/assert/doc.go
create mode 100644 vendor/github.com/stretchr/testify/assert/errors.go
create mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go
create mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go
create mode 100644 vendor/github.com/stretchr/testify/require/doc.go
create mode 100644 vendor/github.com/stretchr/testify/require/forward_requirements.go
create mode 100644 vendor/github.com/stretchr/testify/require/require.go
create mode 100644 vendor/github.com/stretchr/testify/require/require.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go
create mode 100644 vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/require/requirements.go
create mode 100644 vendor/github.com/syndtr/goleveldb/LICENSE
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/batch.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/doc.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/errors.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/key.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/options.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/range.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/version.go
delete mode 100644 vendor/golang.org/x/crypto/.gitattributes
delete mode 100644 vendor/golang.org/x/crypto/.gitignore
delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTING.md
delete mode 100644 vendor/golang.org/x/crypto/README
create mode 100644 vendor/golang.org/x/crypto/acme/autocert/listener.go
create mode 100644 vendor/golang.org/x/crypto/acme/http.go
create mode 100644 vendor/golang.org/x/crypto/acme/version_go112.go
delete mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go
delete mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go
delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go
delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s.go
delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_386.go
delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_386.s
delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go
delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_generic.go
delete mode 100644 vendor/golang.org/x/crypto/blake2s/blake2s_ref.go
delete mode 100644 vendor/golang.org/x/crypto/blowfish/block.go
delete mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go
delete mode 100644 vendor/golang.org/x/crypto/blowfish/const.go
delete mode 100644 vendor/golang.org/x/crypto/bn256/bn256.go
delete mode 100644 vendor/golang.org/x/crypto/bn256/constants.go
delete mode 100644 vendor/golang.org/x/crypto/bn256/curve.go
delete mode 100644 vendor/golang.org/x/crypto/bn256/gfp12.go
delete mode 100644 vendor/golang.org/x/crypto/bn256/gfp2.go
delete mode 100644 vendor/golang.org/x/crypto/bn256/gfp6.go
delete mode 100644 vendor/golang.org/x/crypto/bn256/optate.go
delete mode 100644 vendor/golang.org/x/crypto/bn256/twist.go
delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go
delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go
delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/internal/chacha20/chacha_generic.go
delete mode 100644 vendor/golang.org/x/crypto/codereview.cfg
delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h
delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go
delete mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go
delete mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
delete mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go
delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
delete mode 100644 vendor/golang.org/x/crypto/ed25519/testdata/sign.input.gz
delete mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go
delete mode 100644 vendor/golang.org/x/crypto/md4/md4.go
delete mode 100644 vendor/golang.org/x/crypto/md4/md4block.go
delete mode 100644 vendor/golang.org/x/crypto/nacl/box/box.go
delete mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
delete mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go
delete mode 100644 vendor/golang.org/x/crypto/otr/libotr_test_helper.c
delete mode 100644 vendor/golang.org/x/crypto/otr/otr.go
delete mode 100644 vendor/golang.org/x/crypto/otr/smp.go
delete mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go
delete mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go
delete mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go
delete mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
delete mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go
delete mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go
delete mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go
delete mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go
delete mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go
delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go
delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.go
delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.s
delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ref.go
delete mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160.go
delete mode 100644 vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
delete mode 100644 vendor/golang.org/x/crypto/salsa20/salsa20.go
delete mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/doc.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/hashes.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/keccakf.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
delete mode 100644 vendor/golang.org/x/crypto/sha3/register.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/sha3.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/shake.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate
delete mode 100644 vendor/golang.org/x/crypto/sha3/xor.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/xor_generic.go
delete mode 100644 vendor/golang.org/x/crypto/sha3/xor_unaligned.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/client.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/forward.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/server.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/certs.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/channel.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/client.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/common.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/connection.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/doc.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/kex.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/keys.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/mac.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/messages.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/mux.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/server.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/session.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/test/doc.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/testdata/doc.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/testdata/keys.go
delete mode 100644 vendor/golang.org/x/crypto/ssh/transport.go
delete mode 100644 vendor/golang.org/x/crypto/tea/cipher.go
delete mode 100644 vendor/golang.org/x/crypto/twofish/twofish.go
delete mode 100644 vendor/golang.org/x/crypto/xtea/block.go
delete mode 100644 vendor/golang.org/x/crypto/xtea/cipher.go
delete mode 100644 vendor/golang.org/x/crypto/xts/xts.go
rename vendor/golang.org/x/{time => net}/AUTHORS (100%)
rename vendor/golang.org/x/{time => net}/CONTRIBUTORS (100%)
rename vendor/golang.org/x/{time => net}/LICENSE (100%)
rename vendor/golang.org/x/{time => net}/PATENTS (100%)
delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq.go
delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go
delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
create mode 100644 vendor/golang.org/x/net/context/go17.go
create mode 100644 vendor/golang.org/x/net/context/go19.go
create mode 100644 vendor/golang.org/x/net/context/pre_go17.go
create mode 100644 vendor/golang.org/x/net/context/pre_go19.go
create mode 100644 vendor/golang.org/x/net/html/atom/atom.go
create mode 100644 vendor/golang.org/x/net/html/atom/gen.go
create mode 100644 vendor/golang.org/x/net/html/atom/table.go
create mode 100644 vendor/golang.org/x/net/html/charset/charset.go
create mode 100644 vendor/golang.org/x/net/html/const.go
create mode 100644 vendor/golang.org/x/net/html/doc.go
create mode 100644 vendor/golang.org/x/net/html/doctype.go
create mode 100644 vendor/golang.org/x/net/html/entity.go
create mode 100644 vendor/golang.org/x/net/html/escape.go
create mode 100644 vendor/golang.org/x/net/html/foreign.go
create mode 100644 vendor/golang.org/x/net/html/node.go
create mode 100644 vendor/golang.org/x/net/html/parse.go
create mode 100644 vendor/golang.org/x/net/html/render.go
create mode 100644 vendor/golang.org/x/net/html/token.go
create mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/punycode.go
create mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go
create mode 100644 vendor/golang.org/x/net/idna/trie.go
create mode 100644 vendor/golang.org/x/net/idna/trieval.go
create mode 100644 vendor/golang.org/x/sys/AUTHORS
create mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/sys/LICENSE
create mode 100644 vendor/golang.org/x/sys/PATENTS
create mode 100644 vendor/golang.org/x/sys/unix/.gitignore
create mode 100644 vendor/golang.org/x/sys/unix/README.md
create mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/aliases.go
create mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go
rename vendor/golang.org/x/{crypto/sha3/keccakf_amd64.go => sys/unix/constants.go} (52%)
create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dirent.go
create mode 100644 vendor/golang.org/x/sys/unix/endian_big.go
create mode 100644 vendor/golang.org/x/sys/unix/endian_little.go
create mode 100644 vendor/golang.org/x/sys/unix/env_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
create mode 100644 vendor/golang.org/x/sys/unix/gccgo.go
create mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c
create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ioctl.go
create mode 100644 vendor/golang.org/x/sys/unix/mkall.sh
create mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh
create mode 100644 vendor/golang.org/x/sys/unix/mkpost.go
create mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go
create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go
create mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go
create mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/race.go
create mode 100644 vendor/golang.org/x/sys/unix/race0.go
create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go
create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/str.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/unix/timestruct.go
create mode 100644 vendor/golang.org/x/sys/unix/types_aix.go
create mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go
create mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace386_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptracearm_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptracemips_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptracemipsle_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/windows/aliases.go
create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s
create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s
create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_arm.s
create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go
create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash
create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash
create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go
create mode 100644 vendor/golang.org/x/sys/windows/race.go
create mode 100644 vendor/golang.org/x/sys/windows/race0.go
create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/service.go
create mode 100644 vendor/golang.org/x/sys/windows/str.go
create mode 100644 vendor/golang.org/x/sys/windows/syscall.go
create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go
create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go
create mode 100644 vendor/golang.org/x/text/AUTHORS
create mode 100644 vendor/golang.org/x/text/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/text/LICENSE
create mode 100644 vendor/golang.org/x/text/PATENTS
create mode 100644 vendor/golang.org/x/text/encoding/charmap/charmap.go
create mode 100644 vendor/golang.org/x/text/encoding/charmap/maketables.go
create mode 100644 vendor/golang.org/x/text/encoding/charmap/tables.go
create mode 100644 vendor/golang.org/x/text/encoding/encoding.go
create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/gen.go
create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go
create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/map.go
create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/tables.go
create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go
create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/mib.go
create mode 100644 vendor/golang.org/x/text/encoding/internal/internal.go
create mode 100644 vendor/golang.org/x/text/encoding/japanese/all.go
create mode 100644 vendor/golang.org/x/text/encoding/japanese/eucjp.go
create mode 100644 vendor/golang.org/x/text/encoding/japanese/iso2022jp.go
create mode 100644 vendor/golang.org/x/text/encoding/japanese/maketables.go
create mode 100644 vendor/golang.org/x/text/encoding/japanese/shiftjis.go
create mode 100644 vendor/golang.org/x/text/encoding/japanese/tables.go
create mode 100644 vendor/golang.org/x/text/encoding/korean/euckr.go
create mode 100644 vendor/golang.org/x/text/encoding/korean/maketables.go
create mode 100644 vendor/golang.org/x/text/encoding/korean/tables.go
create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/all.go
create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go
create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go
create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go
create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/big5.go
create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/tables.go
create mode 100644 vendor/golang.org/x/text/encoding/unicode/override.go
create mode 100644 vendor/golang.org/x/text/encoding/unicode/unicode.go
create mode 100644 vendor/golang.org/x/text/internal/language/common.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact/compact.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact/language.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact/parents.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact/tables.go
create mode 100644 vendor/golang.org/x/text/internal/language/compact/tags.go
create mode 100644 vendor/golang.org/x/text/internal/language/compose.go
create mode 100644 vendor/golang.org/x/text/internal/language/coverage.go
create mode 100644 vendor/golang.org/x/text/internal/language/gen.go
create mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go
create mode 100644 vendor/golang.org/x/text/internal/language/language.go
create mode 100644 vendor/golang.org/x/text/internal/language/lookup.go
create mode 100644 vendor/golang.org/x/text/internal/language/match.go
create mode 100644 vendor/golang.org/x/text/internal/language/parse.go
create mode 100644 vendor/golang.org/x/text/internal/language/tables.go
create mode 100644 vendor/golang.org/x/text/internal/language/tags.go
create mode 100644 vendor/golang.org/x/text/internal/tag/tag.go
create mode 100644 vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
create mode 100644 vendor/golang.org/x/text/language/coverage.go
create mode 100644 vendor/golang.org/x/text/language/doc.go
create mode 100644 vendor/golang.org/x/text/language/gen.go
create mode 100644 vendor/golang.org/x/text/language/go1_1.go
create mode 100644 vendor/golang.org/x/text/language/go1_2.go
create mode 100644 vendor/golang.org/x/text/language/language.go
create mode 100644 vendor/golang.org/x/text/language/match.go
create mode 100644 vendor/golang.org/x/text/language/parse.go
create mode 100644 vendor/golang.org/x/text/language/tables.go
create mode 100644 vendor/golang.org/x/text/language/tags.go
create mode 100644 vendor/golang.org/x/text/runes/cond.go
create mode 100644 vendor/golang.org/x/text/runes/runes.go
create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go
rename vendor/golang.org/x/{crypto/blake2b/blake2b_ref.go => text/secure/bidirule/bidirule10.0.0.go} (50%)
create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go
create mode 100644 vendor/golang.org/x/text/transform/transform.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go
create mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go
delete mode 100644 vendor/golang.org/x/time/CONTRIBUTING.md
delete mode 100644 vendor/golang.org/x/time/README
delete mode 100644 vendor/golang.org/x/time/rate/rate.go
create mode 100644 vendor/google.golang.org/appengine/LICENSE
create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql.go
create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
create mode 100644 vendor/gopkg.in/check.v1/.gitignore
create mode 100644 vendor/gopkg.in/check.v1/.travis.yml
create mode 100644 vendor/gopkg.in/check.v1/LICENSE
create mode 100644 vendor/gopkg.in/check.v1/README.md
create mode 100644 vendor/gopkg.in/check.v1/TODO
create mode 100644 vendor/gopkg.in/check.v1/benchmark.go
create mode 100644 vendor/gopkg.in/check.v1/check.go
create mode 100644 vendor/gopkg.in/check.v1/checkers.go
create mode 100644 vendor/gopkg.in/check.v1/helpers.go
create mode 100644 vendor/gopkg.in/check.v1/printer.go
create mode 100644 vendor/gopkg.in/check.v1/reporter.go
create mode 100644 vendor/gopkg.in/check.v1/run.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/.gitignore
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/.travis.yml
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/LICENSE
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/MIGRATION_GUIDE.md
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/README.md
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/VERSION_HISTORY.md
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/claims.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/doc.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/ecdsa.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/ecdsa_utils.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/errors.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/hmac.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/map_claims.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/none.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/parser.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/rsa.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/rsa_pss.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/rsa_utils.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/signing_method.go
create mode 100644 vendor/gopkg.in/dgrijalva/jwt-go.v3/token.go
create mode 100644 vendor/modules.txt
delete mode 100644 vendor/vendor.json
create mode 100644 vendor/xorm.io/builder/.drone.yml
create mode 100644 vendor/xorm.io/builder/LICENSE
create mode 100644 vendor/xorm.io/builder/README.md
create mode 100644 vendor/xorm.io/builder/builder.go
create mode 100644 vendor/xorm.io/builder/builder_delete.go
create mode 100644 vendor/xorm.io/builder/builder_insert.go
create mode 100644 vendor/xorm.io/builder/builder_limit.go
create mode 100644 vendor/xorm.io/builder/builder_select.go
create mode 100644 vendor/xorm.io/builder/builder_union.go
create mode 100644 vendor/xorm.io/builder/builder_update.go
create mode 100644 vendor/xorm.io/builder/cond.go
create mode 100644 vendor/xorm.io/builder/cond_and.go
create mode 100644 vendor/xorm.io/builder/cond_between.go
create mode 100644 vendor/xorm.io/builder/cond_compare.go
create mode 100644 vendor/xorm.io/builder/cond_eq.go
create mode 100644 vendor/xorm.io/builder/cond_expr.go
create mode 100644 vendor/xorm.io/builder/cond_if.go
create mode 100644 vendor/xorm.io/builder/cond_in.go
create mode 100644 vendor/xorm.io/builder/cond_like.go
create mode 100644 vendor/xorm.io/builder/cond_neq.go
create mode 100644 vendor/xorm.io/builder/cond_not.go
create mode 100644 vendor/xorm.io/builder/cond_notin.go
create mode 100644 vendor/xorm.io/builder/cond_null.go
create mode 100644 vendor/xorm.io/builder/cond_or.go
create mode 100644 vendor/xorm.io/builder/doc.go
create mode 100644 vendor/xorm.io/builder/error.go
create mode 100644 vendor/xorm.io/builder/go.mod
create mode 100644 vendor/xorm.io/builder/go.sum
create mode 100644 vendor/xorm.io/builder/sql.go
create mode 100644 vendor/xorm.io/builder/string_builder.go
create mode 100644 vendor/xorm.io/core/.drone.yml
create mode 100644 vendor/xorm.io/core/.gitignore
create mode 100644 vendor/xorm.io/core/LICENSE
create mode 100644 vendor/xorm.io/core/README.md
create mode 100644 vendor/xorm.io/core/benchmark.sh
create mode 100644 vendor/xorm.io/core/cache.go
create mode 100644 vendor/xorm.io/core/column.go
create mode 100644 vendor/xorm.io/core/converstion.go
create mode 100644 vendor/xorm.io/core/db.go
create mode 100644 vendor/xorm.io/core/dialect.go
create mode 100644 vendor/xorm.io/core/driver.go
create mode 100644 vendor/xorm.io/core/error.go
create mode 100644 vendor/xorm.io/core/filter.go
create mode 100644 vendor/xorm.io/core/go.mod
create mode 100644 vendor/xorm.io/core/go.sum
create mode 100644 vendor/xorm.io/core/ilogger.go
create mode 100644 vendor/xorm.io/core/index.go
create mode 100644 vendor/xorm.io/core/mapper.go
create mode 100644 vendor/xorm.io/core/pk.go
create mode 100644 vendor/xorm.io/core/rows.go
create mode 100644 vendor/xorm.io/core/scan.go
create mode 100644 vendor/xorm.io/core/stmt.go
create mode 100644 vendor/xorm.io/core/table.go
create mode 100644 vendor/xorm.io/core/tx.go
create mode 100644 vendor/xorm.io/core/type.go
diff --git a/ext/db/directsql/sqlengine.go b/ext/db/directsql/sqlengine.go
index 5e3aa84..ebcdd3d 100644
--- a/ext/db/directsql/sqlengine.go
+++ b/ext/db/directsql/sqlengine.go
@@ -19,7 +19,7 @@ import (
"errors"
"fmt"
- "github.com/go-xorm/core"
+ "xorm.io/core"
"github.com/henrylee2cn/faygo"
)
diff --git a/ext/db/directsql/sqlhelper.go b/ext/db/directsql/sqlhelper.go
index d88e77a..3754ac0 100644
--- a/ext/db/directsql/sqlhelper.go
+++ b/ext/db/directsql/sqlhelper.go
@@ -11,7 +11,7 @@ import (
"strconv"
"time"
- "github.com/go-xorm/core"
+ "xorm.io/core"
)
//-------解析参数的函数------------
diff --git a/ext/db/directsql/sqlmanage.go b/ext/db/directsql/sqlmanage.go
index cd4dcec..8339ec0 100644
--- a/ext/db/directsql/sqlmanage.go
+++ b/ext/db/directsql/sqlmanage.go
@@ -30,7 +30,7 @@ import (
"sync"
"github.com/fsnotify/fsnotify"
- "github.com/go-xorm/core"
+ "xorm.io/core"
"github.com/henrylee2cn/faygo"
faygoxorm "github.com/henrylee2cn/faygo/ext/db/xorm"
confpkg "github.com/henrylee2cn/ini"
diff --git a/ext/db/directsql/sqlservice.go b/ext/db/directsql/sqlservice.go
index e191445..557dc93 100644
--- a/ext/db/directsql/sqlservice.go
+++ b/ext/db/directsql/sqlservice.go
@@ -19,7 +19,7 @@ import (
"errors"
"reflect"
- "github.com/go-xorm/core"
+ "xorm.io/core"
"github.com/henrylee2cn/faygo"
)
diff --git a/ext/db/xorm/logger.go b/ext/db/xorm/logger.go
index 1fa093a..30e9190 100644
--- a/ext/db/xorm/logger.go
+++ b/ext/db/xorm/logger.go
@@ -1,7 +1,7 @@
package xorm
import (
- "github.com/go-xorm/core"
+ "xorm.io/core"
"github.com/henrylee2cn/faygo"
"github.com/henrylee2cn/faygo/logging"
diff --git a/ext/db/xorm/service.go b/ext/db/xorm/service.go
index 317c9bb..faa07ad 100644
--- a/ext/db/xorm/service.go
+++ b/ext/db/xorm/service.go
@@ -5,7 +5,7 @@ import (
"path/filepath"
"strings"
- "github.com/go-xorm/core"
+ "xorm.io/core"
"github.com/go-xorm/xorm"
// _ "github.com/denisenkom/go-mssqldb" //mssql
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..5053112
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,43 @@
+module github.com/henrylee2cn/faygo
+
+go 1.12
+
+require (
+ github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668
+ github.com/couchbase/go-couchbase v0.0.0-20190808141609-0a5dfbe71f2f
+ github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d // indirect
+ github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b // indirect
+ github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
+ github.com/edsrzf/mmap-go v1.0.0 // indirect
+ github.com/elazarl/go-bindata-assetfs v1.0.0
+ github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51
+ github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9
+ github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
+ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
+ github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4
+ github.com/fsnotify/fsnotify v1.4.7
+ github.com/garyburd/redigo v1.6.0
+ github.com/go-sql-driver/mysql v1.4.1
+ github.com/go-xorm/xorm v0.7.6
+ github.com/gorilla/websocket v1.4.0
+ github.com/henrylee2cn/goutil v0.0.0-20190807075143-e8afa09140e9
+ github.com/henrylee2cn/ini v1.29.0
+ github.com/jinzhu/gorm v1.9.10
+ github.com/jmoiron/sqlx v1.2.0
+ github.com/json-iterator/go v1.1.7
+ github.com/lib/pq v1.2.0
+ github.com/pelletier/go-toml v1.4.0 // indirect
+ github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
+ github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92
+ github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d // indirect
+ github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec
+ github.com/stretchr/testify v1.3.0
+ github.com/syndtr/goleveldb v1.0.0 // indirect
+ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
+ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
+ golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa
+ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
+ gopkg.in/dgrijalva/jwt-go.v3 v3.2.0
+ xorm.io/core v0.7.0
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..fc2b1c3
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,266 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
+cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA=
+github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/couchbase/go-couchbase v0.0.0-20190808141609-0a5dfbe71f2f h1:5cEzA2Th0dTK0SgzU4hl1sy904maoZo13GTtK5WBB9o=
+github.com/couchbase/go-couchbase v0.0.0-20190808141609-0a5dfbe71f2f/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
+github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d h1:XMf4E1U+b9E3ElF0mjvfXZdflBRZz4gLp16nQ/QSHQM=
+github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
+github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b h1:bZ9rKU2/V8sY+NulSfxDOnXTWcs1rySqdF1sVepihvo=
+github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
+github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ=
+github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
+github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o=
+github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
+github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
+github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
+github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
+github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 h1:wWke/RUCl7VRjQhwPlR/v0glZXNYzBHdNUzf/Am2Nmg=
+github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9/go.mod h1:uPmAp6Sws4L7+Q/OokbWDAK1ibXYhB3PXFP1kol5hPg=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
+github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=
+github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
+github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4 h1:GY1+t5Dr9OKADM64SYnQjw/w99HMYvQ0A8/JoUkxVmc=
+github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
+github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=
+github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y=
+github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:56xuuqnHyryaerycW3BfssRdxQstACi0Epw/yC5E2xM=
+github.com/go-xorm/xorm v0.7.6 h1:qFbuobVfAYzMlf9C8hrLnp4B17VUEIH0eZuZ0IfXWjo=
+github.com/go-xorm/xorm v0.7.6/go.mod h1:nqz2TAsuOHWH2yk4FYWtacCGgdbrcdZ5mF1XadqEHls=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/henrylee2cn/goutil v0.0.0-20190807075143-e8afa09140e9 h1:a2H8nokxD0y7RtMAD0c0iyRjIc6PPBopF2/8zw5kFiE=
+github.com/henrylee2cn/goutil v0.0.0-20190807075143-e8afa09140e9/go.mod h1:I9qYeMYwdKC7UFXMECNzCEv0fYuolqLeBMqsmeG7IVo=
+github.com/henrylee2cn/ini v1.29.0 h1:qlvyhAtpeRDG7qlBEdCQFzElyBjCgAFJSxEVnFFEgHc=
+github.com/henrylee2cn/ini v1.29.0/go.mod h1:ucAh/Gt/gt+AIbD75B7tfRz4w9QzAHlmvDzXrWGonYE=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
+github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
+github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90=
+github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
+github.com/jinzhu/gorm v1.9.10 h1:HvrsqdhCW78xpJF67g1hMxS6eCToo9PZH4LDB8WKPac=
+github.com/jinzhu/gorm v1.9.10/go.mod h1:Kh6hTsSGffh4ui079FHrR5Gg+5D0hgihqDcsDN2BBJY=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 h1:rhqTjzJlm7EbkELJDKMTU7udov+Se0xZkWmugr6zGok=
+github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
+github.com/juju/loggo v0.0.0-20180524022052-584905176618 h1:MK144iBQF9hTSwBW/9eJm034bVoG30IshVm688T2hi8=
+github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
+github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073 h1:WQM1NildKThwdP7qWrNAFGzp4ijNLw8RlgENkaI4MJs=
+github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
+github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
+github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
+github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
+github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92 h1:qvsJwGToa8rxb42cDRhkbKeX2H5N8BH+s2aUikGt8mI=
+github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg=
+github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d h1:NVwnfyR3rENtlz62bcrkXME3INVUa4lcdGt+opvxExs=
+github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec h1:q6XVwXmKvCRHRqesF3cSv6lNqqHi0QWOvgDlSohg8UA=
+github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
+github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M=
+golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw=
+google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/dgrijalva/jwt-go.v3 v3.2.0 h1:N46iQqOtHry7Hxzb9PGrP68oovQmj7EhudNoKHvbOvI=
+gopkg.in/dgrijalva/jwt-go.v3 v3.2.0/go.mod h1:hdNXC2Z9yC029rvsQ/on2ZNQ44Z2XToVhpXXbR+J05A=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+xorm.io/builder v0.3.5 h1:EilU39fvWDxjb1cDaELpYhsF+zziRBhew8xk4pngO+A=
+xorm.io/builder v0.3.5/go.mod h1:ZFbByS/KxZI1FKRjL05PyJ4YrK2bcxlUaAxdum5aTR8=
+xorm.io/core v0.7.0 h1:hKxuOKWZNeiFQsSuGet/KV8HZ788hclvAl+7azx3tkM=
+xorm.io/core v0.7.0/go.mod h1:TuOJjIVa7e3w/rN8tDcAvuLBMtwzdHPbyOzE6Gk1EUI=
diff --git a/vendor/github.com/bradfitz/gomemcache/LICENSE b/vendor/github.com/bradfitz/gomemcache/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
new file mode 100644
index 0000000..25e88ca
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
@@ -0,0 +1,687 @@
+/*
+Copyright 2011 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package memcache provides a client for the memcached cache server.
+package memcache
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Similar to:
+// https://godoc.org/google.golang.org/appengine/memcache
+
+var (
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ ErrCacheMiss = errors.New("memcache: cache miss")
+
+ // ErrCASConflict means that a CompareAndSwap call failed due to the
+ // cached value being modified between the Get and the CompareAndSwap.
+ // If the cached value was simply evicted rather than replaced,
+ // ErrNotStored will be returned instead.
+ ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+
+ // ErrNotStored means that a conditional write operation (i.e. Add or
+ // CompareAndSwap) failed because the condition was not satisfied.
+ ErrNotStored = errors.New("memcache: item not stored")
+
+ // ErrServer means that a server error occurred.
+ ErrServerError = errors.New("memcache: server error")
+
+ // ErrNoStats means that no statistics were available.
+ ErrNoStats = errors.New("memcache: no statistics available")
+
+ // ErrMalformedKey is returned when an invalid key is used.
+ // Keys must be at maximum 250 bytes long and not
+ // contain whitespace or control characters.
+ ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
+
+ // ErrNoServers is returned when no servers are configured or available.
+ ErrNoServers = errors.New("memcache: no servers configured or available")
+)
+
+const (
+ // DefaultTimeout is the default socket read/write timeout.
+ DefaultTimeout = 100 * time.Millisecond
+
+ // DefaultMaxIdleConns is the default maximum number of idle connections
+ // kept for any single address.
+ DefaultMaxIdleConns = 2
+)
+
+const buffered = 8 // arbitrary buffered channel size, for readability
+
+// resumableError returns true if err is only a protocol-level cache error.
+// This is used to determine whether or not a server connection should
+// be re-used or not. If an error occurs, by default we don't reuse the
+// connection, unless it was just a cache error.
+func resumableError(err error) bool {
+ switch err {
+ case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
+ return true
+ }
+ return false
+}
+
+func legalKey(key string) bool {
+ if len(key) > 250 {
+ return false
+ }
+ for i := 0; i < len(key); i++ {
+ if key[i] <= ' ' || key[i] == 0x7f {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ crlf = []byte("\r\n")
+ space = []byte(" ")
+ resultOK = []byte("OK\r\n")
+ resultStored = []byte("STORED\r\n")
+ resultNotStored = []byte("NOT_STORED\r\n")
+ resultExists = []byte("EXISTS\r\n")
+ resultNotFound = []byte("NOT_FOUND\r\n")
+ resultDeleted = []byte("DELETED\r\n")
+ resultEnd = []byte("END\r\n")
+ resultOk = []byte("OK\r\n")
+ resultTouched = []byte("TOUCHED\r\n")
+
+ resultClientErrorPrefix = []byte("CLIENT_ERROR ")
+)
+
+// New returns a memcache client using the provided server(s)
+// with equal weight. If a server is listed multiple times,
+// it gets a proportional amount of weight.
+func New(server ...string) *Client {
+ ss := new(ServerList)
+ ss.SetServers(server...)
+ return NewFromSelector(ss)
+}
+
+// NewFromSelector returns a new Client using the provided ServerSelector.
+func NewFromSelector(ss ServerSelector) *Client {
+ return &Client{selector: ss}
+}
+
+// Client is a memcache client.
+// It is safe for unlocked use by multiple concurrent goroutines.
+type Client struct {
+ // Timeout specifies the socket read/write timeout.
+ // If zero, DefaultTimeout is used.
+ Timeout time.Duration
+
+ // MaxIdleConns specifies the maximum number of idle connections that will
+ // be maintained per address. If less than one, DefaultMaxIdleConns will be
+ // used.
+ //
+ // Consider your expected traffic rates and latency carefully. This should
+ // be set to a number higher than your peak parallel requests.
+ MaxIdleConns int
+
+ selector ServerSelector
+
+ lk sync.Mutex
+ freeconn map[string][]*conn
+}
+
+// Item is an item to be got or stored in a memcached server.
+type Item struct {
+ // Key is the Item's key (250 bytes maximum).
+ Key string
+
+ // Value is the Item's value.
+ Value []byte
+
+ // Flags are server-opaque flags whose semantics are entirely
+ // up to the app.
+ Flags uint32
+
+ // Expiration is the cache expiration time, in seconds: either a relative
+ // time from now (up to 1 month), or an absolute Unix epoch time.
+ // Zero means the Item has no expiration time.
+ Expiration int32
+
+ // Compare and swap ID.
+ casid uint64
+}
+
+// conn is a connection to a server.
+type conn struct {
+ nc net.Conn
+ rw *bufio.ReadWriter
+ addr net.Addr
+ c *Client
+}
+
+// release returns this connection back to the client's free pool
+func (cn *conn) release() {
+ cn.c.putFreeConn(cn.addr, cn)
+}
+
+func (cn *conn) extendDeadline() {
+ cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
+}
+
+// condRelease releases this connection if the error pointed to by err
+// is nil (not an error) or is only a protocol level error (e.g. a
+// cache miss). The purpose is to not recycle TCP connections that
+// are bad.
+func (cn *conn) condRelease(err *error) {
+ if *err == nil || resumableError(*err) {
+ cn.release()
+ } else {
+ cn.nc.Close()
+ }
+}
+
+func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
+ c.lk.Lock()
+ defer c.lk.Unlock()
+ if c.freeconn == nil {
+ c.freeconn = make(map[string][]*conn)
+ }
+ freelist := c.freeconn[addr.String()]
+ if len(freelist) >= c.maxIdleConns() {
+ cn.nc.Close()
+ return
+ }
+ c.freeconn[addr.String()] = append(freelist, cn)
+}
+
+func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
+ c.lk.Lock()
+ defer c.lk.Unlock()
+ if c.freeconn == nil {
+ return nil, false
+ }
+ freelist, ok := c.freeconn[addr.String()]
+ if !ok || len(freelist) == 0 {
+ return nil, false
+ }
+ cn = freelist[len(freelist)-1]
+ c.freeconn[addr.String()] = freelist[:len(freelist)-1]
+ return cn, true
+}
+
+func (c *Client) netTimeout() time.Duration {
+ if c.Timeout != 0 {
+ return c.Timeout
+ }
+ return DefaultTimeout
+}
+
+func (c *Client) maxIdleConns() int {
+ if c.MaxIdleConns > 0 {
+ return c.MaxIdleConns
+ }
+ return DefaultMaxIdleConns
+}
+
+// ConnectTimeoutError is the error type used when it takes
+// too long to connect to the desired host. This level of
+// detail can generally be ignored.
+type ConnectTimeoutError struct {
+ Addr net.Addr
+}
+
+func (cte *ConnectTimeoutError) Error() string {
+ return "memcache: connect timeout to " + cte.Addr.String()
+}
+
+func (c *Client) dial(addr net.Addr) (net.Conn, error) {
+ type connError struct {
+ cn net.Conn
+ err error
+ }
+
+ nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
+ if err == nil {
+ return nc, nil
+ }
+
+ if ne, ok := err.(net.Error); ok && ne.Timeout() {
+ return nil, &ConnectTimeoutError{addr}
+ }
+
+ return nil, err
+}
+
+func (c *Client) getConn(addr net.Addr) (*conn, error) {
+ cn, ok := c.getFreeConn(addr)
+ if ok {
+ cn.extendDeadline()
+ return cn, nil
+ }
+ nc, err := c.dial(addr)
+ if err != nil {
+ return nil, err
+ }
+ cn = &conn{
+ nc: nc,
+ addr: addr,
+ rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
+ c: c,
+ }
+ cn.extendDeadline()
+ return cn, nil
+}
+
+func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error {
+ addr, err := c.selector.PickServer(item.Key)
+ if err != nil {
+ return err
+ }
+ cn, err := c.getConn(addr)
+ if err != nil {
+ return err
+ }
+ defer cn.condRelease(&err)
+ if err = fn(c, cn.rw, item); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *Client) FlushAll() error {
+ return c.selector.Each(c.flushAllFromAddr)
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a
+// memcache cache miss. The key must be at most 250 bytes in length.
+func (c *Client) Get(key string) (item *Item, err error) {
+ err = c.withKeyAddr(key, func(addr net.Addr) error {
+ return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
+ })
+ if err == nil && item == nil {
+ err = ErrCacheMiss
+ }
+ return
+}
+
+// Touch updates the expiry for the given key. The seconds parameter is either
+// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
+// into the future at which time the item will expire. Zero means the item has
+// no expiration time. ErrCacheMiss is returned if the key is not in the cache.
+// The key must be at most 250 bytes in length.
+func (c *Client) Touch(key string, seconds int32) (err error) {
+ return c.withKeyAddr(key, func(addr net.Addr) error {
+ return c.touchFromAddr(addr, []string{key}, seconds)
+ })
+}
+
+func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
+ if !legalKey(key) {
+ return ErrMalformedKey
+ }
+ addr, err := c.selector.PickServer(key)
+ if err != nil {
+ return err
+ }
+ return fn(addr)
+}
+
+func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
+ cn, err := c.getConn(addr)
+ if err != nil {
+ return err
+ }
+ defer cn.condRelease(&err)
+ return fn(cn.rw)
+}
+
+func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
+ return c.withKeyAddr(key, func(addr net.Addr) error {
+ return c.withAddrRw(addr, fn)
+ })
+}
+
+func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
+ return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ if err := parseGetResponse(rw.Reader, cb); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+// flushAllFromAddr send the flush_all command to the given addr
+func (c *Client) flushAllFromAddr(addr net.Addr) error {
+ return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ line, err := rw.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultOk):
+ break
+ default:
+ return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
+ }
+ return nil
+ })
+}
+
+func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
+ return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ for _, key := range keys {
+ if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ line, err := rw.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultTouched):
+ break
+ case bytes.Equal(line, resultNotFound):
+ return ErrCacheMiss
+ default:
+ return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
+ }
+ }
+ return nil
+ })
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to
+// items may have fewer elements than the input slice, due to memcache
+// cache misses. Each key must be at most 250 bytes in length.
+// If no error is returned, the returned map will also be non-nil.
+func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
+ var lk sync.Mutex
+ m := make(map[string]*Item)
+ addItemToMap := func(it *Item) {
+ lk.Lock()
+ defer lk.Unlock()
+ m[it.Key] = it
+ }
+
+ keyMap := make(map[net.Addr][]string)
+ for _, key := range keys {
+ if !legalKey(key) {
+ return nil, ErrMalformedKey
+ }
+ addr, err := c.selector.PickServer(key)
+ if err != nil {
+ return nil, err
+ }
+ keyMap[addr] = append(keyMap[addr], key)
+ }
+
+ ch := make(chan error, buffered)
+ for addr, keys := range keyMap {
+ go func(addr net.Addr, keys []string) {
+ ch <- c.getFromAddr(addr, keys, addItemToMap)
+ }(addr, keys)
+ }
+
+ var err error
+ for _ = range keyMap {
+ if ge := <-ch; ge != nil {
+ err = ge
+ }
+ }
+ return m, err
+}
+
+// parseGetResponse reads a GET response from r and calls cb for each
+// read and allocated Item
+func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
+ for {
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+ if bytes.Equal(line, resultEnd) {
+ return nil
+ }
+ it := new(Item)
+ size, err := scanGetResponseLine(line, it)
+ if err != nil {
+ return err
+ }
+ it.Value = make([]byte, size+2)
+ _, err = io.ReadFull(r, it.Value)
+ if err != nil {
+ it.Value = nil
+ return err
+ }
+ if !bytes.HasSuffix(it.Value, crlf) {
+ it.Value = nil
+ return fmt.Errorf("memcache: corrupt get result read")
+ }
+ it.Value = it.Value[:size]
+ cb(it)
+ }
+}
+
+// scanGetResponseLine populates it and returns the declared size of the item.
+// It does not read the bytes of the item.
+func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
+ pattern := "VALUE %s %d %d %d\r\n"
+ dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
+ if bytes.Count(line, space) == 3 {
+ pattern = "VALUE %s %d %d\r\n"
+ dest = dest[:3]
+ }
+ n, err := fmt.Sscanf(string(line), pattern, dest...)
+ if err != nil || n != len(dest) {
+ return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
+ }
+ return size, nil
+}
+
+// Set writes the given item, unconditionally.
+func (c *Client) Set(item *Item) error {
+ return c.onItem(item, (*Client).set)
+}
+
+func (c *Client) set(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "set", item)
+}
+
+// Add writes the given item, if no value already exists for its
+// key. ErrNotStored is returned if that condition is not met.
+func (c *Client) Add(item *Item) error {
+ return c.onItem(item, (*Client).add)
+}
+
+func (c *Client) add(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "add", item)
+}
+
+// Replace writes the given item, but only if the server *does*
+// already hold data for this key
+func (c *Client) Replace(item *Item) error {
+ return c.onItem(item, (*Client).replace)
+}
+
+func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "replace", item)
+}
+
+// CompareAndSwap writes the given item that was previously returned
+// by Get, if the value was neither modified or evicted between the
+// Get and the CompareAndSwap calls. The item's Key should not change
+// between calls but all other item fields may differ. ErrCASConflict
+// is returned if the value was modified in between the
+// calls. ErrNotStored is returned if the value was evicted in between
+// the calls.
+func (c *Client) CompareAndSwap(item *Item) error {
+ return c.onItem(item, (*Client).cas)
+}
+
+func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "cas", item)
+}
+
+func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error {
+ if !legalKey(item.Key) {
+ return ErrMalformedKey
+ }
+ var err error
+ if verb == "cas" {
+ _, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
+ verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
+ } else {
+ _, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
+ verb, item.Key, item.Flags, item.Expiration, len(item.Value))
+ }
+ if err != nil {
+ return err
+ }
+ if _, err = rw.Write(item.Value); err != nil {
+ return err
+ }
+ if _, err := rw.Write(crlf); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ line, err := rw.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultStored):
+ return nil
+ case bytes.Equal(line, resultNotStored):
+ return ErrNotStored
+ case bytes.Equal(line, resultExists):
+ return ErrCASConflict
+ case bytes.Equal(line, resultNotFound):
+ return ErrCacheMiss
+ }
+ return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line))
+}
+
+func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
+ _, err := fmt.Fprintf(rw, format, args...)
+ if err != nil {
+ return nil, err
+ }
+ if err := rw.Flush(); err != nil {
+ return nil, err
+ }
+ line, err := rw.ReadSlice('\n')
+ return line, err
+}
+
+func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
+ line, err := writeReadLine(rw, format, args...)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultOK):
+ return nil
+ case bytes.Equal(line, expect):
+ return nil
+ case bytes.Equal(line, resultNotStored):
+ return ErrNotStored
+ case bytes.Equal(line, resultExists):
+ return ErrCASConflict
+ case bytes.Equal(line, resultNotFound):
+ return ErrCacheMiss
+ }
+ return fmt.Errorf("memcache: unexpected response line: %q", string(line))
+}
+
+// Delete deletes the item with the provided key. The error ErrCacheMiss is
+// returned if the item didn't already exist in the cache.
+func (c *Client) Delete(key string) error {
+ return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+ return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
+ })
+}
+
+// DeleteAll deletes all items in the cache.
+func (c *Client) DeleteAll() error {
+ return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
+ return writeExpectf(rw, resultDeleted, "flush_all\r\n")
+ })
+}
+
+// Increment atomically increments key by delta. The return value is
+// the new value after being incremented or an error. If the value
+// didn't exist in memcached the error is ErrCacheMiss. The value in
+// memcached must be an decimal number, or an error will be returned.
+// On 64-bit overflow, the new value wraps around.
+func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
+ return c.incrDecr("incr", key, delta)
+}
+
+// Decrement atomically decrements key by delta. The return value is
+// the new value after being decremented or an error. If the value
+// didn't exist in memcached the error is ErrCacheMiss. The value in
+// memcached must be an decimal number, or an error will be returned.
+// On underflow, the new value is capped at zero and does not wrap
+// around.
+func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
+ return c.incrDecr("decr", key, delta)
+}
+
+func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
+ var val uint64
+ err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+ line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultNotFound):
+ return ErrCacheMiss
+ case bytes.HasPrefix(line, resultClientErrorPrefix):
+ errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
+ return errors.New("memcache: client error: " + string(errMsg))
+ }
+ val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+ return val, err
+}
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
new file mode 100644
index 0000000..89ad81e
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2011 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package memcache
+
+import (
+ "hash/crc32"
+ "net"
+ "strings"
+ "sync"
+)
+
+// ServerSelector is the interface that selects a memcache server
+// as a function of the item's key.
+//
+// All ServerSelector implementations must be safe for concurrent use
+// by multiple goroutines.
+type ServerSelector interface {
+ // PickServer returns the server address that a given item
+ // should be shared onto.
+ PickServer(key string) (net.Addr, error)
+ Each(func(net.Addr) error) error
+}
+
+// ServerList is a simple ServerSelector. Its zero value is usable.
+type ServerList struct {
+ mu sync.RWMutex
+ addrs []net.Addr
+}
+
+// staticAddr caches the Network() and String() values from any net.Addr.
+type staticAddr struct {
+ ntw, str string
+}
+
+func newStaticAddr(a net.Addr) net.Addr {
+ return &staticAddr{
+ ntw: a.Network(),
+ str: a.String(),
+ }
+}
+
+func (s *staticAddr) Network() string { return s.ntw }
+func (s *staticAddr) String() string { return s.str }
+
+// SetServers changes a ServerList's set of servers at runtime and is
+// safe for concurrent use by multiple goroutines.
+//
+// Each server is given equal weight. A server is given more weight
+// if it's listed multiple times.
+//
+// SetServers returns an error if any of the server names fail to
+// resolve. No attempt is made to connect to the server. If any error
+// is returned, no changes are made to the ServerList.
+func (ss *ServerList) SetServers(servers ...string) error {
+ naddr := make([]net.Addr, len(servers))
+ for i, server := range servers {
+ if strings.Contains(server, "/") {
+ addr, err := net.ResolveUnixAddr("unix", server)
+ if err != nil {
+ return err
+ }
+ naddr[i] = newStaticAddr(addr)
+ } else {
+ tcpaddr, err := net.ResolveTCPAddr("tcp", server)
+ if err != nil {
+ return err
+ }
+ naddr[i] = newStaticAddr(tcpaddr)
+ }
+ }
+
+ ss.mu.Lock()
+ defer ss.mu.Unlock()
+ ss.addrs = naddr
+ return nil
+}
+
+// Each iterates over each server calling the given function
+func (ss *ServerList) Each(f func(net.Addr) error) error {
+ ss.mu.RLock()
+ defer ss.mu.RUnlock()
+ for _, a := range ss.addrs {
+ if err := f(a); nil != err {
+ return err
+ }
+ }
+ return nil
+}
+
+// keyBufPool returns []byte buffers for use by PickServer's call to
+// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
+// copies, which at least are bounded in size and small)
+var keyBufPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 256)
+ return &b
+ },
+}
+
+func (ss *ServerList) PickServer(key string) (net.Addr, error) {
+ ss.mu.RLock()
+ defer ss.mu.RUnlock()
+ if len(ss.addrs) == 0 {
+ return nil, ErrNoServers
+ }
+ if len(ss.addrs) == 1 {
+ return ss.addrs[0], nil
+ }
+ bufp := keyBufPool.Get().(*[]byte)
+ n := copy(*bufp, key)
+ cs := crc32.ChecksumIEEE((*bufp)[:n])
+ keyBufPool.Put(bufp)
+
+ return ss.addrs[cs%uint32(len(ss.addrs))], nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/.gitignore b/vendor/github.com/couchbase/go-couchbase/.gitignore
new file mode 100644
index 0000000..eda885c
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/.gitignore
@@ -0,0 +1,14 @@
+#*
+*.6
+*.a
+*~
+*.swp
+/examples/basic/basic
+/hello/hello
+/populate/populate
+/tools/view2go/view2go
+/tools/loadfile/loadfile
+gotags.files
+TAGS
+6.out
+_*
diff --git a/vendor/github.com/couchbase/go-couchbase/.travis.yml b/vendor/github.com/couchbase/go-couchbase/.travis.yml
new file mode 100644
index 0000000..4ecafb1
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/.travis.yml
@@ -0,0 +1,5 @@
+language: go
+install: go get -v -d ./... && go build -v ./...
+script: go test -v ./...
+
+go: 1.1.1
diff --git a/vendor/github.com/couchbase/go-couchbase/LICENSE b/vendor/github.com/couchbase/go-couchbase/LICENSE
new file mode 100644
index 0000000..0b23ef3
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Couchbase, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/couchbase/go-couchbase/README.markdown b/vendor/github.com/couchbase/go-couchbase/README.markdown
new file mode 100644
index 0000000..bf5fe49
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/README.markdown
@@ -0,0 +1,37 @@
+# A smart client for couchbase in go
+
+This is a *unoffical* version of a Couchbase Golang client. If you are
+looking for the *Offical* Couchbase Golang client please see
+ [CB-go])[https://github.com/couchbaselabs/gocb].
+
+This is an evolving package, but does provide a useful interface to a
+[couchbase](http://www.couchbase.com/) server including all of the
+pool/bucket discovery features, compatible key distribution with other
+clients, and vbucket motion awareness so application can continue to
+operate during rebalances.
+
+It also supports view querying with source node randomization so you
+don't bang on all one node to do all the work.
+
+## Install
+
+ go get github.com/couchbase/go-couchbase
+
+## Example
+
+ c, err := couchbase.Connect("http://dev-couchbase.example.com:8091/")
+ if err != nil {
+ log.Fatalf("Error connecting: %v", err)
+ }
+
+ pool, err := c.GetPool("default")
+ if err != nil {
+ log.Fatalf("Error getting pool: %v", err)
+ }
+
+ bucket, err := pool.GetBucket("default")
+ if err != nil {
+ log.Fatalf("Error getting bucket: %v", err)
+ }
+
+ bucket.Set("someKey", 0, []string{"an", "example", "list"})
diff --git a/vendor/github.com/couchbase/go-couchbase/audit.go b/vendor/github.com/couchbase/go-couchbase/audit.go
new file mode 100644
index 0000000..3db7d9f
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/audit.go
@@ -0,0 +1,32 @@
+package couchbase
+
+import ()
+
+// Sample data:
+// {"disabled":["12333", "22244"],"uid":"132492431","auditdEnabled":true,
+// "disabledUsers":[{"name":"bill","domain":"local"},{"name":"bob","domain":"local"}],
+// "logPath":"/Users/johanlarson/Library/Application Support/Couchbase/var/lib/couchbase/logs",
+// "rotateInterval":86400,"rotateSize":20971520}
+type AuditSpec struct {
+ Disabled []uint32 `json:"disabled"`
+ Uid string `json:"uid"`
+ AuditdEnabled bool `json:"auditdEnabled`
+ DisabledUsers []AuditUser `json:"disabledUsers"`
+ LogPath string `json:"logPath"`
+ RotateInterval int64 `json:"rotateInterval"`
+ RotateSize int64 `json:"rotateSize"`
+}
+
+type AuditUser struct {
+ Name string `json:"name"`
+ Domain string `json:"domain"`
+}
+
+func (c *Client) GetAuditSpec() (*AuditSpec, error) {
+ ret := &AuditSpec{}
+ err := c.parseURLResponse("/settings/audit", ret)
+ if err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/client.go b/vendor/github.com/couchbase/go-couchbase/client.go
new file mode 100644
index 0000000..433b08f
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/client.go
@@ -0,0 +1,1477 @@
+/*
+Package couchbase provides a smart client for go.
+
+Usage:
+
+ client, err := couchbase.Connect("http://myserver:8091/")
+ handleError(err)
+ pool, err := client.GetPool("default")
+ handleError(err)
+ bucket, err := pool.GetBucket("MyAwesomeBucket")
+ handleError(err)
+ ...
+
+or a shortcut for the bucket directly
+
+ bucket, err := couchbase.GetBucket("http://myserver:8091/", "default", "default")
+
+in any case, you can specify authentication credentials using
+standard URL userinfo syntax:
+
+ b, err := couchbase.GetBucket("http://bucketname:bucketpass@myserver:8091/",
+ "default", "bucket")
+*/
+package couchbase
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/gomemcached/client" // package name is 'memcached'
+ "github.com/couchbase/goutils/logging"
+)
+
+// Mutation Token
+type MutationToken struct {
+ VBid uint16 // vbucket id
+ Guard uint64 // vbuuid
+ Value uint64 // sequence number
+}
+
+// Maximum number of times to retry a chunk of a bulk get on error.
+var MaxBulkRetries = 5000
+var backOffDuration time.Duration = 100 * time.Millisecond
+var MaxBackOffRetries = 25 // exponentail backOff result in over 30sec (25*13*0.1s)
+
+// If this is set to a nonzero duration, Do() and ViewCustom() will log a warning if the call
+// takes longer than that.
+var SlowServerCallWarningThreshold time.Duration
+
+func slowLog(startTime time.Time, format string, args ...interface{}) {
+ if elapsed := time.Now().Sub(startTime); elapsed > SlowServerCallWarningThreshold {
+ pc, _, _, _ := runtime.Caller(2)
+ caller := runtime.FuncForPC(pc).Name()
+ logging.Infof("go-couchbase: "+format+" in "+caller+" took "+elapsed.String(), args...)
+ }
+}
+
+// Return true if error is KEY_ENOENT. Required by cbq-engine
+func IsKeyEExistsError(err error) bool {
+
+ res, ok := err.(*gomemcached.MCResponse)
+ if ok && res.Status == gomemcached.KEY_EEXISTS {
+ return true
+ }
+
+ return false
+}
+
+// Return true if error is KEY_ENOENT. Required by cbq-engine
+func IsKeyNoEntError(err error) bool {
+
+ res, ok := err.(*gomemcached.MCResponse)
+ if ok && res.Status == gomemcached.KEY_ENOENT {
+ return true
+ }
+
+ return false
+}
+
+// Return true if error suggests a bucket refresh is required. Required by cbq-engine
+func IsRefreshRequired(err error) bool {
+
+ res, ok := err.(*gomemcached.MCResponse)
+ if ok && (res.Status == gomemcached.NO_BUCKET || res.Status == gomemcached.NOT_MY_VBUCKET) {
+ return true
+ }
+
+ return false
+}
+
+// ClientOpCallback is called for each invocation of Do.
+var ClientOpCallback func(opname, k string, start time.Time, err error)
+
+// Do executes a function on a memcached connection to the node owning key "k"
+//
+// Note that this automatically handles transient errors by replaying
+// your function on a "not-my-vbucket" error, so don't assume
+// your command will only be executed only once.
+func (b *Bucket) Do(k string, f func(mc *memcached.Client, vb uint16) error) (err error) {
+ return b.Do2(k, f, true)
+}
+
+func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, deadline bool) (err error) {
+ if SlowServerCallWarningThreshold > 0 {
+ defer slowLog(time.Now(), "call to Do(%q)", k)
+ }
+
+ vb := b.VBHash(k)
+ maxTries := len(b.Nodes()) * 2
+ for i := 0; i < maxTries; i++ {
+ conn, pool, err := b.getConnectionToVBucket(vb)
+ if err != nil {
+ if isConnError(err) && backOff(i, maxTries, backOffDuration, true) {
+ b.Refresh()
+ continue
+ }
+ return err
+ }
+
+ if deadline && DefaultTimeout > 0 {
+ conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
+ err = f(conn, uint16(vb))
+ conn.SetDeadline(noDeadline)
+ } else {
+ err = f(conn, uint16(vb))
+ }
+
+ var retry bool
+ discard := isOutOfBoundsError(err)
+
+ // MB-30967 / MB-31001 implement back off for transient errors
+ if resp, ok := err.(*gomemcached.MCResponse); ok {
+ switch resp.Status {
+ case gomemcached.NOT_MY_VBUCKET:
+ b.Refresh()
+ // MB-28842: in case of NMVB, check if the node is still part of the map
+ // and ditch the connection if it isn't.
+ discard = b.checkVBmap(pool.Node())
+ retry = true
+ case gomemcached.NOT_SUPPORTED:
+ discard = true
+ retry = true
+ case gomemcached.ENOMEM:
+ fallthrough
+ case gomemcached.TMPFAIL:
+ retry = backOff(i, maxTries, backOffDuration, true)
+ default:
+ retry = false
+ }
+ } else if err != nil && isConnError(err) && backOff(i, maxTries, backOffDuration, true) {
+ retry = true
+ }
+
+ if discard {
+ pool.Discard(conn)
+ } else {
+ pool.Return(conn)
+ }
+
+ if !retry {
+ return err
+ }
+ }
+
+ return fmt.Errorf("unable to complete action after %v attemps", maxTries)
+}
+
+type GatheredStats struct {
+ Server string
+ Stats map[string]string
+ Err error
+}
+
+func getStatsParallel(sn string, b *Bucket, offset int, which string,
+ ch chan<- GatheredStats) {
+ pool := b.getConnPool(offset)
+ var gatheredStats GatheredStats
+
+ conn, err := pool.Get()
+ defer func() {
+ pool.Return(conn)
+ ch <- gatheredStats
+ }()
+
+ if err != nil {
+ gatheredStats = GatheredStats{Server: sn, Err: err}
+ } else {
+ sm, err := conn.StatsMap(which)
+ gatheredStats = GatheredStats{Server: sn, Stats: sm, Err: err}
+ }
+}
+
+// GetStats gets a set of stats from all servers.
+//
+// Returns a map of server ID -> map of stat key to map value.
+func (b *Bucket) GetStats(which string) map[string]map[string]string {
+ rv := map[string]map[string]string{}
+ for server, gs := range b.GatherStats(which) {
+ if len(gs.Stats) > 0 {
+ rv[server] = gs.Stats
+ }
+ }
+ return rv
+}
+
+// GatherStats returns a map of server ID -> GatheredStats from all servers.
+func (b *Bucket) GatherStats(which string) map[string]GatheredStats {
+ vsm := b.VBServerMap()
+ if vsm.ServerList == nil {
+ return nil
+ }
+
+ // Go grab all the things at once.
+ ch := make(chan GatheredStats, len(vsm.ServerList))
+ for i, sn := range vsm.ServerList {
+ go getStatsParallel(sn, b, i, which, ch)
+ }
+
+ // Gather the results
+ rv := map[string]GatheredStats{}
+ for range vsm.ServerList {
+ gs := <-ch
+ rv[gs.Server] = gs
+ }
+ return rv
+}
+
+// Get bucket count through the bucket stats
+func (b *Bucket) GetCount(refresh bool) (count int64, err error) {
+ if refresh {
+ b.Refresh()
+ }
+
+ var cnt int64
+ for _, gs := range b.GatherStats("") {
+ if len(gs.Stats) > 0 {
+ cnt, err = strconv.ParseInt(gs.Stats["curr_items"], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ count += cnt
+ }
+ }
+
+ return count, nil
+}
+
+// Get bucket document size through the bucket stats
+func (b *Bucket) GetSize(refresh bool) (size int64, err error) {
+ if refresh {
+ b.Refresh()
+ }
+
+ var sz int64
+ for _, gs := range b.GatherStats("") {
+ if len(gs.Stats) > 0 {
+ sz, err = strconv.ParseInt(gs.Stats["ep_value_size"], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ size += sz
+ }
+ }
+
+ return size, nil
+}
+
+func isAuthError(err error) bool {
+ estr := err.Error()
+ return strings.Contains(estr, "Auth failure")
+}
+
+func IsReadTimeOutError(err error) bool {
+ estr := err.Error()
+ return strings.Contains(estr, "read tcp") ||
+ strings.Contains(estr, "i/o timeout")
+}
+
+func isTimeoutError(err error) bool {
+ estr := err.Error()
+ return strings.Contains(estr, "i/o timeout") ||
+ strings.Contains(estr, "connection timed out") ||
+ strings.Contains(estr, "no route to host")
+}
+
+// Errors that are not considered fatal for our fetch loop
+func isConnError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ estr := err.Error()
+ return strings.Contains(estr, "broken pipe") ||
+ strings.Contains(estr, "connection reset") ||
+ strings.Contains(estr, "connection refused") ||
+ strings.Contains(estr, "connection pool is closed")
+}
+
+func isOutOfBoundsError(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "Out of Bounds error")
+
+}
+
+func getDeadline(reqDeadline time.Time, duration time.Duration) time.Time {
+ if reqDeadline.IsZero() && duration > 0 {
+ return time.Now().Add(duration)
+ }
+ return reqDeadline
+}
+
+func backOff(attempt, maxAttempts int, duration time.Duration, exponential bool) bool {
+ if attempt < maxAttempts {
+ // 0th attempt return immediately
+ if attempt > 0 {
+ if exponential {
+ duration = time.Duration(attempt) * duration
+ }
+ time.Sleep(duration)
+ }
+ return true
+ }
+
+ return false
+}
+
+func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time,
+ ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string,
+ eStatus *errorStatus) {
+ if SlowServerCallWarningThreshold > 0 {
+ defer slowLog(time.Now(), "call to doBulkGet(%d, %d keys)", vb, len(keys))
+ }
+
+ rv := _STRING_MCRESPONSE_POOL.Get()
+ attempts := 0
+ backOffAttempts := 0
+ done := false
+ bname := b.Name
+ for ; attempts < MaxBulkRetries && !done && !eStatus.errStatus; attempts++ {
+
+ if len(b.VBServerMap().VBucketMap) < int(vb) {
+ //fatal
+ err := fmt.Errorf("vbmap smaller than requested for %v", bname)
+ logging.Errorf("go-couchbase: %v vb %d vbmap len %d", err.Error(), vb, len(b.VBServerMap().VBucketMap))
+ ech <- err
+ return
+ }
+
+ masterID := b.VBServerMap().VBucketMap[vb][0]
+
+ if masterID < 0 {
+ // fatal
+ err := fmt.Errorf("No master node available for %v vb %d", bname, vb)
+ logging.Errorf("%v", err.Error())
+ ech <- err
+ return
+ }
+
+ // This stack frame exists to ensure we can clean up
+ // connection at a reasonable time.
+ err := func() error {
+ pool := b.getConnPool(masterID)
+ conn, err := pool.Get()
+ if err != nil {
+ if isAuthError(err) || isTimeoutError(err) {
+ logging.Errorf("Fatal Error %v : %v", bname, err)
+ ech <- err
+ return err
+ } else if isConnError(err) {
+ if !backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
+ logging.Errorf("Connection Error %v : %v", bname, err)
+ ech <- err
+ return err
+ }
+ b.Refresh()
+ backOffAttempts++
+ }
+ logging.Infof("Pool Get returned %v: %v", bname, err)
+ // retry
+ return nil
+ }
+
+ conn.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ err = conn.GetBulk(vb, keys, rv, subPaths)
+ conn.SetDeadline(noDeadline)
+
+ discard := false
+ defer func() {
+ if discard {
+ pool.Discard(conn)
+ } else {
+ pool.Return(conn)
+ }
+ }()
+
+ switch err.(type) {
+ case *gomemcached.MCResponse:
+ notSMaxTries := len(b.Nodes()) * 2
+ st := err.(*gomemcached.MCResponse).Status
+ if st == gomemcached.NOT_MY_VBUCKET || (st == gomemcached.NOT_SUPPORTED && attempts < notSMaxTries) {
+ b.Refresh()
+ discard = b.checkVBmap(pool.Node())
+ return nil // retry
+ } else if st == gomemcached.EBUSY || st == gomemcached.LOCKED {
+ if (attempts % (MaxBulkRetries / 100)) == 0 {
+ logging.Infof("Retrying Memcached error (%v) FOR %v(vbid:%d, keys:%v)",
+ err.Error(), bname, vb, keys)
+ }
+ return nil // retry
+ } else if (st == gomemcached.ENOMEM || st == gomemcached.TMPFAIL) && backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
+ // MB-30967 / MB-31001 use backoff for TMPFAIL too
+ backOffAttempts++
+ logging.Infof("Retrying Memcached error (%v) FOR %v(vbid:%d, keys:%v)",
+ err.Error(), bname, vb, keys)
+ return nil // retry
+ }
+ ech <- err
+ return err
+ case error:
+ if isOutOfBoundsError(err) {
+ // We got an out of bound error, retry the operation
+ discard = true
+ return nil
+ } else if isConnError(err) && backOff(backOffAttempts, MaxBackOffRetries, backOffDuration, true) {
+ backOffAttempts++
+ logging.Errorf("Connection Error: %s. Refreshing bucket %v (vbid:%v,keys:%v)",
+ err.Error(), bname, vb, keys)
+ discard = true
+ b.Refresh()
+ return nil // retry
+ }
+ ech <- err
+ ch <- rv
+ return err
+ }
+
+ done = true
+ return nil
+ }()
+
+ if err != nil {
+ return
+ }
+ }
+
+ if attempts >= MaxBulkRetries {
+ err := fmt.Errorf("bulkget exceeded MaxBulkRetries for %v(vbid:%d,keys:%v)", bname, vb, keys)
+ logging.Errorf("%v", err.Error())
+ ech <- err
+ }
+
+ ch <- rv
+}
+
+type errorStatus struct {
+ errStatus bool
+}
+
+type vbBulkGet struct {
+ b *Bucket
+ ch chan<- map[string]*gomemcached.MCResponse
+ ech chan<- error
+ k uint16
+ keys []string
+ reqDeadline time.Time
+ wg *sync.WaitGroup
+ subPaths []string
+ groupError *errorStatus
+}
+
+const _NUM_CHANNELS = 5
+
+var _NUM_CHANNEL_WORKERS = (runtime.NumCPU() + 1) / 2
+var DefaultDialTimeout = time.Duration(0)
+var DefaultTimeout = time.Duration(0)
+var noDeadline = time.Time{}
+
+// Buffer 4k requests per worker
+var _VB_BULK_GET_CHANNELS []chan *vbBulkGet
+
+func InitBulkGet() {
+
+ DefaultDialTimeout = 20 * time.Second
+ DefaultTimeout = 120 * time.Second
+
+ memcached.SetDefaultDialTimeout(DefaultDialTimeout)
+
+ _VB_BULK_GET_CHANNELS = make([]chan *vbBulkGet, _NUM_CHANNELS)
+
+ for i := 0; i < _NUM_CHANNELS; i++ {
+ channel := make(chan *vbBulkGet, 16*1024*_NUM_CHANNEL_WORKERS)
+ _VB_BULK_GET_CHANNELS[i] = channel
+
+ for j := 0; j < _NUM_CHANNEL_WORKERS; j++ {
+ go vbBulkGetWorker(channel)
+ }
+ }
+}
+
+func vbBulkGetWorker(ch chan *vbBulkGet) {
+ defer func() {
+ // Workers cannot panic and die
+ recover()
+ go vbBulkGetWorker(ch)
+ }()
+
+ for vbg := range ch {
+ vbDoBulkGet(vbg)
+ }
+}
+
+func vbDoBulkGet(vbg *vbBulkGet) {
+ defer vbg.wg.Done()
+ defer func() {
+ // Workers cannot panic and die
+ recover()
+ }()
+ vbg.b.doBulkGet(vbg.k, vbg.keys, vbg.reqDeadline, vbg.ch, vbg.ech, vbg.subPaths, vbg.groupError)
+}
+
+var _ERR_CHAN_FULL = fmt.Errorf("Data request queue full, aborting query.")
+
+func (b *Bucket) processBulkGet(kdm map[uint16][]string, reqDeadline time.Time,
+ ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string,
+ eStatus *errorStatus) {
+
+ defer close(ch)
+ defer close(ech)
+
+ wg := &sync.WaitGroup{}
+
+ for k, keys := range kdm {
+
+ // GetBulk() group has error donot Queue items for this group
+ if eStatus.errStatus {
+ break
+ }
+
+ vbg := &vbBulkGet{
+ b: b,
+ ch: ch,
+ ech: ech,
+ k: k,
+ keys: keys,
+ reqDeadline: reqDeadline,
+ wg: wg,
+ subPaths: subPaths,
+ groupError: eStatus,
+ }
+
+ wg.Add(1)
+
+ // Random int
+ // Right shift to avoid 8-byte alignment, and take low bits
+ c := (uintptr(unsafe.Pointer(vbg)) >> 4) % _NUM_CHANNELS
+
+ select {
+ case _VB_BULK_GET_CHANNELS[c] <- vbg:
+ // No-op
+ default:
+ // Buffer full, abandon the bulk get
+ ech <- _ERR_CHAN_FULL
+ wg.Add(-1)
+ }
+ }
+
+ // Wait for my vb bulk gets
+ wg.Wait()
+}
+
+type multiError []error
+
+func (m multiError) Error() string {
+ if len(m) == 0 {
+ panic("Error of none")
+ }
+
+ return fmt.Sprintf("{%v errors, starting with %v}", len(m), m[0].Error())
+}
+
+// Convert a stream of errors from ech into a multiError (or nil) and
+// send down eout.
+//
+// At least one send is guaranteed on eout, but two is possible, so
+// buffer the out channel appropriately.
+func errorCollector(ech <-chan error, eout chan<- error, eStatus *errorStatus) {
+ defer func() { eout <- nil }()
+ var errs multiError
+ for e := range ech {
+ if !eStatus.errStatus && !IsKeyNoEntError(e) {
+ eStatus.errStatus = true
+ }
+
+ errs = append(errs, e)
+ }
+
+ if len(errs) > 0 {
+ eout <- errs
+ }
+}
+
+// Fetches multiple keys concurrently, with []byte values
+//
+// This is a wrapper around GetBulk which converts all values returned
+// by GetBulk from raw memcached responses into []byte slices.
+// Returns one document for duplicate keys
+func (b *Bucket) GetBulkRaw(keys []string) (map[string][]byte, error) {
+
+ resp, eout := b.getBulk(keys, noDeadline, nil)
+
+ rv := make(map[string][]byte, len(keys))
+ for k, av := range resp {
+ rv[k] = av.Body
+ }
+
+ b.ReleaseGetBulkPools(resp)
+ return rv, eout
+
+}
+
+// GetBulk fetches multiple keys concurrently.
+//
+// Unlike more convenient GETs, the entire response is returned in the
+// map array for each key. Keys that were not found will not be included in
+// the map.
+
+func (b *Bucket) GetBulk(keys []string, reqDeadline time.Time, subPaths []string) (map[string]*gomemcached.MCResponse, error) {
+ return b.getBulk(keys, reqDeadline, subPaths)
+}
+
+func (b *Bucket) ReleaseGetBulkPools(rv map[string]*gomemcached.MCResponse) {
+ _STRING_MCRESPONSE_POOL.Put(rv)
+}
+
+func (b *Bucket) getBulk(keys []string, reqDeadline time.Time, subPaths []string) (map[string]*gomemcached.MCResponse, error) {
+ kdm := _VB_STRING_POOL.Get()
+ defer _VB_STRING_POOL.Put(kdm)
+ for _, k := range keys {
+ if k != "" {
+ vb := uint16(b.VBHash(k))
+ a, ok1 := kdm[vb]
+ if !ok1 {
+ a = _STRING_POOL.Get()
+ }
+ kdm[vb] = append(a, k)
+ }
+ }
+
+ eout := make(chan error, 2)
+ groupErrorStatus := &errorStatus{}
+
+ // processBulkGet will own both of these channels and
+ // guarantee they're closed before it returns.
+ ch := make(chan map[string]*gomemcached.MCResponse)
+ ech := make(chan error)
+
+ go errorCollector(ech, eout, groupErrorStatus)
+ go b.processBulkGet(kdm, reqDeadline, ch, ech, subPaths, groupErrorStatus)
+
+ var rv map[string]*gomemcached.MCResponse
+
+ for m := range ch {
+ if rv == nil {
+ rv = m
+ continue
+ }
+
+ for k, v := range m {
+ rv[k] = v
+ }
+ _STRING_MCRESPONSE_POOL.Put(m)
+ }
+
+ return rv, <-eout
+}
+
+// WriteOptions is the set of option flags availble for the Write
+// method. They are ORed together to specify the desired request.
+type WriteOptions int
+
+const (
+ // Raw specifies that the value is raw []byte or nil; don't
+ // JSON-encode it.
+ Raw = WriteOptions(1 << iota)
+ // AddOnly indicates an item should only be written if it
+ // doesn't exist, otherwise ErrKeyExists is returned.
+ AddOnly
+ // Persist causes the operation to block until the server
+ // confirms the item is persisted.
+ Persist
+ // Indexable causes the operation to block until it's availble via the index.
+ Indexable
+ // Append indicates the given value should be appended to the
+ // existing value for the given key.
+ Append
+)
+
+var optNames = []struct {
+ opt WriteOptions
+ name string
+}{
+ {Raw, "raw"},
+ {AddOnly, "addonly"}, {Persist, "persist"},
+ {Indexable, "indexable"}, {Append, "append"},
+}
+
+// String representation of WriteOptions
+func (w WriteOptions) String() string {
+ f := []string{}
+ for _, on := range optNames {
+ if w&on.opt != 0 {
+ f = append(f, on.name)
+ w &= ^on.opt
+ }
+ }
+ if len(f) == 0 || w != 0 {
+ f = append(f, fmt.Sprintf("0x%x", int(w)))
+ }
+ return strings.Join(f, "|")
+}
+
+// Error returned from Write with AddOnly flag, when key already exists in the bucket.
+var ErrKeyExists = errors.New("key exists")
+
+// General-purpose value setter.
+//
+// The Set, Add and Delete methods are just wrappers around this. The
+// interpretation of `v` depends on whether the `Raw` option is
+// given. If it is, v must be a byte array or nil. (A nil value causes
+// a delete.) If `Raw` is not given, `v` will be marshaled as JSON
+// before being written. It must be JSON-marshalable and it must not
+// be nil.
+func (b *Bucket) Write(k string, flags, exp int, v interface{},
+ opt WriteOptions) (err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback(fmt.Sprintf("Write(%v)", opt), k, t, err)
+ }(time.Now())
+ }
+
+ var data []byte
+ if opt&Raw == 0 {
+ data, err = json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ } else if v != nil {
+ data = v.([]byte)
+ }
+
+ var res *gomemcached.MCResponse
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ if opt&AddOnly != 0 {
+ res, err = memcached.UnwrapMemcachedError(
+ mc.Add(vb, k, flags, exp, data))
+ if err == nil && res.Status != gomemcached.SUCCESS {
+ if res.Status == gomemcached.KEY_EEXISTS {
+ err = ErrKeyExists
+ } else {
+ err = res
+ }
+ }
+ } else if opt&Append != 0 {
+ res, err = mc.Append(vb, k, data)
+ } else if data == nil {
+ res, err = mc.Del(vb, k)
+ } else {
+ res, err = mc.Set(vb, k, flags, exp, data)
+ }
+
+ return err
+ })
+
+ if err == nil && (opt&(Persist|Indexable) != 0) {
+ err = b.WaitForPersistence(k, res.Cas, data == nil)
+ }
+
+ return err
+}
+
+func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
+ opt WriteOptions) (mt *MutationToken, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback(fmt.Sprintf("WriteWithMT(%v)", opt), k, t, err)
+ }(time.Now())
+ }
+
+ var data []byte
+ if opt&Raw == 0 {
+ data, err = json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ } else if v != nil {
+ data = v.([]byte)
+ }
+
+ var res *gomemcached.MCResponse
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ if opt&AddOnly != 0 {
+ res, err = memcached.UnwrapMemcachedError(
+ mc.Add(vb, k, flags, exp, data))
+ if err == nil && res.Status != gomemcached.SUCCESS {
+ if res.Status == gomemcached.KEY_EEXISTS {
+ err = ErrKeyExists
+ } else {
+ err = res
+ }
+ }
+ } else if opt&Append != 0 {
+ res, err = mc.Append(vb, k, data)
+ } else if data == nil {
+ res, err = mc.Del(vb, k)
+ } else {
+ res, err = mc.Set(vb, k, flags, exp, data)
+ }
+
+ if len(res.Extras) >= 16 {
+ vbuuid := uint64(binary.BigEndian.Uint64(res.Extras[0:8]))
+ seqNo := uint64(binary.BigEndian.Uint64(res.Extras[8:16]))
+ mt = &MutationToken{VBid: vb, Guard: vbuuid, Value: seqNo}
+ }
+
+ return err
+ })
+
+ if err == nil && (opt&(Persist|Indexable) != 0) {
+ err = b.WaitForPersistence(k, res.Cas, data == nil)
+ }
+
+ return mt, err
+}
+
+// Set a value in this bucket with Cas and return the new Cas value
+func (b *Bucket) Cas(k string, exp int, cas uint64, v interface{}) (uint64, error) {
+ return b.WriteCas(k, 0, exp, cas, v, 0)
+}
+
+// Set a value in this bucket with Cas without json encoding it
+func (b *Bucket) CasRaw(k string, exp int, cas uint64, v interface{}) (uint64, error) {
+ return b.WriteCas(k, 0, exp, cas, v, Raw)
+}
+
+func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{},
+ opt WriteOptions) (newCas uint64, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback(fmt.Sprintf("Write(%v)", opt), k, t, err)
+ }(time.Now())
+ }
+
+ var data []byte
+ if opt&Raw == 0 {
+ data, err = json.Marshal(v)
+ if err != nil {
+ return 0, err
+ }
+ } else if v != nil {
+ data = v.([]byte)
+ }
+
+ var res *gomemcached.MCResponse
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err = mc.SetCas(vb, k, flags, exp, cas, data)
+ return err
+ })
+
+ if err == nil && (opt&(Persist|Indexable) != 0) {
+ err = b.WaitForPersistence(k, res.Cas, data == nil)
+ }
+
+ return res.Cas, err
+}
+
+// Extended CAS operation. These functions will return the mutation token, i.e vbuuid & guard
+func (b *Bucket) CasWithMeta(k string, flags int, exp int, cas uint64, v interface{}) (uint64, *MutationToken, error) {
+ return b.WriteCasWithMT(k, flags, exp, cas, v, 0)
+}
+
+func (b *Bucket) CasWithMetaRaw(k string, flags int, exp int, cas uint64, v interface{}) (uint64, *MutationToken, error) {
+ return b.WriteCasWithMT(k, flags, exp, cas, v, Raw)
+}
+
+func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interface{},
+ opt WriteOptions) (newCas uint64, mt *MutationToken, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback(fmt.Sprintf("Write(%v)", opt), k, t, err)
+ }(time.Now())
+ }
+
+ var data []byte
+ if opt&Raw == 0 {
+ data, err = json.Marshal(v)
+ if err != nil {
+ return 0, nil, err
+ }
+ } else if v != nil {
+ data = v.([]byte)
+ }
+
+ var res *gomemcached.MCResponse
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err = mc.SetCas(vb, k, flags, exp, cas, data)
+ return err
+ })
+
+ if err != nil {
+ return 0, nil, err
+ }
+
+ // check for extras
+ if len(res.Extras) >= 16 {
+ vbuuid := uint64(binary.BigEndian.Uint64(res.Extras[0:8]))
+ seqNo := uint64(binary.BigEndian.Uint64(res.Extras[8:16]))
+ vb := b.VBHash(k)
+ mt = &MutationToken{VBid: uint16(vb), Guard: vbuuid, Value: seqNo}
+ }
+
+ if err == nil && (opt&(Persist|Indexable) != 0) {
+ err = b.WaitForPersistence(k, res.Cas, data == nil)
+ }
+
+ return res.Cas, mt, err
+}
+
+// Set a value in this bucket.
+// The value will be serialized into a JSON document.
+func (b *Bucket) Set(k string, exp int, v interface{}) error {
+ return b.Write(k, 0, exp, v, 0)
+}
+
+// Set a value in this bucket with with flags
+func (b *Bucket) SetWithMeta(k string, flags int, exp int, v interface{}) (*MutationToken, error) {
+ return b.WriteWithMT(k, flags, exp, v, 0)
+}
+
+// SetRaw sets a value in this bucket without JSON encoding it.
+func (b *Bucket) SetRaw(k string, exp int, v []byte) error {
+ return b.Write(k, 0, exp, v, Raw)
+}
+
+// Add adds a value to this bucket; like Set except that nothing
+// happens if the key exists. The value will be serialized into a
+// JSON document.
+func (b *Bucket) Add(k string, exp int, v interface{}) (added bool, err error) {
+ err = b.Write(k, 0, exp, v, AddOnly)
+ if err == ErrKeyExists {
+ return false, nil
+ }
+ return (err == nil), err
+}
+
+// AddRaw adds a value to this bucket; like SetRaw except that nothing
+// happens if the key exists. The value will be stored as raw bytes.
+func (b *Bucket) AddRaw(k string, exp int, v []byte) (added bool, err error) {
+ err = b.Write(k, 0, exp, v, AddOnly|Raw)
+ if err == ErrKeyExists {
+ return false, nil
+ }
+ return (err == nil), err
+}
+
+// Add adds a value to this bucket; like Set except that nothing
+// happens if the key exists. The value will be serialized into a
+// JSON document.
+func (b *Bucket) AddWithMT(k string, exp int, v interface{}) (added bool, mt *MutationToken, err error) {
+ mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly)
+ if err == ErrKeyExists {
+ return false, mt, nil
+ }
+ return (err == nil), mt, err
+}
+
+// AddRaw adds a value to this bucket; like SetRaw except that nothing
+// happens if the key exists. The value will be stored as raw bytes.
+func (b *Bucket) AddRawWithMT(k string, exp int, v []byte) (added bool, mt *MutationToken, err error) {
+ mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly|Raw)
+ if err == ErrKeyExists {
+ return false, mt, nil
+ }
+ return (err == nil), mt, err
+}
+
+// Append appends raw data to an existing item.
+func (b *Bucket) Append(k string, data []byte) error {
+ return b.Write(k, 0, 0, data, Append|Raw)
+}
+
+func (b *Bucket) GetsMCFromCollection(collUid uint32, key string, reqDeadline time.Time) (*gomemcached.MCResponse, error) {
+ var err error
+ var response *gomemcached.MCResponse
+
+ if key == "" {
+ return nil, nil
+ }
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsMCFromCollection", key, t, err) }(time.Now())
+ }
+
+ err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
+ var err1 error
+
+ mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ _, err1 = mc.SelectBucket(b.Name)
+ if err1 != nil {
+ mc.SetDeadline(noDeadline)
+ return err1
+ }
+
+ mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ response, err1 = mc.GetFromCollection(vb, collUid, key)
+ if err1 != nil {
+ mc.SetDeadline(noDeadline)
+ return err1
+ }
+
+ return nil
+ }, false)
+
+ return response, err
+}
+
+// Returns collectionUid, manifestUid, error.
+func (b *Bucket) GetCollectionCID(scope string, collection string, reqDeadline time.Time) (uint32, uint32, error) {
+ var err error
+ var response *gomemcached.MCResponse
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetCollectionCID", scope+"."+collection, t, err) }(time.Now())
+ }
+
+ var key = "DUMMY" // Contact any server.
+ var manifestUid uint32
+ var collUid uint32
+ err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
+ var err1 error
+
+ mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ _, err1 = mc.SelectBucket(b.Name)
+ if err1 != nil {
+ mc.SetDeadline(noDeadline)
+ return err1
+ }
+
+ response, err1 = mc.CollectionsGetCID(scope, collection)
+ if err1 != nil {
+ mc.SetDeadline(noDeadline)
+ return err1
+ }
+
+ manifestUid = binary.BigEndian.Uint32(response.Extras[4:8])
+ collUid = binary.BigEndian.Uint32(response.Extras[8:12])
+
+ return nil
+ }, false)
+
+ return collUid, manifestUid, err
+}
+
+// Get a value straight from Memcached
+func (b *Bucket) GetsMC(key string, reqDeadline time.Time) (*gomemcached.MCResponse, error) {
+ var err error
+ var response *gomemcached.MCResponse
+
+ if key == "" {
+ return nil, nil
+ }
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsMC", key, t, err) }(time.Now())
+ }
+
+ err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
+ var err1 error
+
+ mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ response, err1 = mc.Get(vb, key)
+ mc.SetDeadline(noDeadline)
+ if err1 != nil {
+ return err1
+ }
+ return nil
+ }, false)
+ return response, err
+}
+
+// Get a value through the subdoc API
+func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string) (*gomemcached.MCResponse, error) {
+ var err error
+ var response *gomemcached.MCResponse
+
+ if key == "" {
+ return nil, nil
+ }
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsSubDoc", key, t, err) }(time.Now())
+ }
+
+ err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
+ var err1 error
+
+ mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
+ response, err1 = mc.GetSubdoc(vb, key, subPaths)
+ mc.SetDeadline(noDeadline)
+ if err1 != nil {
+ return err1
+ }
+ return nil
+ }, false)
+ return response, err
+}
+
+// GetsRaw gets a raw value from this bucket including its CAS
+// counter and flags.
+func (b *Bucket) GetsRaw(k string) (data []byte, flags int,
+ cas uint64, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsRaw", k, t, err) }(time.Now())
+ }
+
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.Get(vb, k)
+ if err != nil {
+ return err
+ }
+ cas = res.Cas
+ if len(res.Extras) >= 4 {
+ flags = int(binary.BigEndian.Uint32(res.Extras))
+ }
+ data = res.Body
+ return nil
+ })
+ return
+}
+
+// Gets gets a value from this bucket, including its CAS counter. The
+// value is expected to be a JSON stream and will be deserialized into
+// rv.
+func (b *Bucket) Gets(k string, rv interface{}, caso *uint64) error {
+ data, _, cas, err := b.GetsRaw(k)
+ if err != nil {
+ return err
+ }
+ if caso != nil {
+ *caso = cas
+ }
+ return json.Unmarshal(data, rv)
+}
+
+// Get a value from this bucket.
+// The value is expected to be a JSON stream and will be deserialized
+// into rv.
+func (b *Bucket) Get(k string, rv interface{}) error {
+ return b.Gets(k, rv, nil)
+}
+
+// GetRaw gets a raw value from this bucket. No marshaling is performed.
+func (b *Bucket) GetRaw(k string) ([]byte, error) {
+ d, _, _, err := b.GetsRaw(k)
+ return d, err
+}
+
+// GetAndTouchRaw gets a raw value from this bucket including its CAS
+// counter and flags, and updates the expiry on the doc.
+func (b *Bucket) GetAndTouchRaw(k string, exp int) (data []byte,
+ cas uint64, err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsRaw", k, t, err) }(time.Now())
+ }
+
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.GetAndTouch(vb, k, exp)
+ if err != nil {
+ return err
+ }
+ cas = res.Cas
+ data = res.Body
+ return nil
+ })
+ return data, cas, err
+}
+
+// GetMeta returns the meta values for a key
+func (b *Bucket) GetMeta(k string, flags *int, expiry *int, cas *uint64, seqNo *uint64) (err error) {
+
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("GetsMeta", k, t, err) }(time.Now())
+ }
+
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.GetMeta(vb, k)
+ if err != nil {
+ return err
+ }
+
+ *cas = res.Cas
+ if len(res.Extras) >= 8 {
+ *flags = int(binary.BigEndian.Uint32(res.Extras[4:]))
+ }
+
+ if len(res.Extras) >= 12 {
+ *expiry = int(binary.BigEndian.Uint32(res.Extras[8:]))
+ }
+
+ if len(res.Extras) >= 20 {
+ *seqNo = uint64(binary.BigEndian.Uint64(res.Extras[12:]))
+ }
+
+ return nil
+ })
+
+ return err
+}
+
+// Delete a key from this bucket.
+func (b *Bucket) Delete(k string) error {
+ return b.Write(k, 0, 0, nil, Raw)
+}
+
+// Incr increments the value at a given key by amt and defaults to def if no value present.
+func (b *Bucket) Incr(k string, amt, def uint64, exp int) (val uint64, err error) {
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("Incr", k, t, err) }(time.Now())
+ }
+
+ var rv uint64
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.Incr(vb, k, amt, def, exp)
+ if err != nil {
+ return err
+ }
+ rv = res
+ return nil
+ })
+ return rv, err
+}
+
+// Decr decrements the value at a given key by amt and defaults to def if no value present
+func (b *Bucket) Decr(k string, amt, def uint64, exp int) (val uint64, err error) {
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("Decr", k, t, err) }(time.Now())
+ }
+
+ var rv uint64
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ res, err := mc.Decr(vb, k, amt, def, exp)
+ if err != nil {
+ return err
+ }
+ rv = res
+ return nil
+ })
+ return rv, err
+}
+
+// Wrapper around memcached.CASNext()
+func (b *Bucket) casNext(k string, exp int, state *memcached.CASState) bool {
+ if ClientOpCallback != nil {
+ defer func(t time.Time) {
+ ClientOpCallback("casNext", k, t, state.Err)
+ }(time.Now())
+ }
+
+ keepGoing := false
+ state.Err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ keepGoing = mc.CASNext(vb, k, exp, state)
+ return state.Err
+ })
+ return keepGoing && state.Err == nil
+}
+
+// An UpdateFunc is a callback function to update a document
+type UpdateFunc func(current []byte) (updated []byte, err error)
+
+// Return this as the error from an UpdateFunc to cancel the Update
+// operation.
+const UpdateCancel = memcached.CASQuit
+
+// Update performs a Safe update of a document, avoiding conflicts by
+// using CAS.
+//
+// The callback function will be invoked with the current raw document
+// contents (or nil if the document doesn't exist); it should return
+// the updated raw contents (or nil to delete.) If it decides not to
+// change anything it can return UpdateCancel as the error.
+//
+// If another writer modifies the document between the get and the
+// set, the callback will be invoked again with the newer value.
+func (b *Bucket) Update(k string, exp int, callback UpdateFunc) error {
+ _, err := b.update(k, exp, callback)
+ return err
+}
+
+// internal version of Update that returns a CAS value
+func (b *Bucket) update(k string, exp int, callback UpdateFunc) (newCas uint64, err error) {
+ var state memcached.CASState
+ for b.casNext(k, exp, &state) {
+ var err error
+ if state.Value, err = callback(state.Value); err != nil {
+ return 0, err
+ }
+ }
+ return state.Cas, state.Err
+}
+
+// A WriteUpdateFunc is a callback function to update a document
+type WriteUpdateFunc func(current []byte) (updated []byte, opt WriteOptions, err error)
+
+// WriteUpdate performs a Safe update of a document, avoiding
+// conflicts by using CAS. WriteUpdate is like Update, except that
+// the callback can return a set of WriteOptions, of which Persist and
+// Indexable are recognized: these cause the call to wait until the
+// document update has been persisted to disk and/or become available
+// to index.
+func (b *Bucket) WriteUpdate(k string, exp int, callback WriteUpdateFunc) error {
+ var writeOpts WriteOptions
+ var deletion bool
+ // Wrap the callback in an UpdateFunc we can pass to Update:
+ updateCallback := func(current []byte) (updated []byte, err error) {
+ update, opt, err := callback(current)
+ writeOpts = opt
+ deletion = (update == nil)
+ return update, err
+ }
+ cas, err := b.update(k, exp, updateCallback)
+ if err != nil {
+ return err
+ }
+ // If callback asked, wait for persistence or indexability:
+ if writeOpts&(Persist|Indexable) != 0 {
+ err = b.WaitForPersistence(k, cas, deletion)
+ }
+ return err
+}
+
+// Observe observes the current state of a document.
+func (b *Bucket) Observe(k string) (result memcached.ObserveResult, err error) {
+ if ClientOpCallback != nil {
+ defer func(t time.Time) { ClientOpCallback("Observe", k, t, err) }(time.Now())
+ }
+
+ err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
+ result, err = mc.Observe(vb, k)
+ return err
+ })
+ return
+}
+
+// Returned from WaitForPersistence (or Write, if the Persistent or Indexable flag is used)
+// if the value has been overwritten by another before being persisted.
+var ErrOverwritten = errors.New("overwritten")
+
+// Returned from WaitForPersistence (or Write, if the Persistent or Indexable flag is used)
+// if the value hasn't been persisted by the timeout interval
+var ErrTimeout = errors.New("timeout")
+
+// WaitForPersistence waits for an item to be considered durable.
+//
+// Besides transport errors, ErrOverwritten may be returned if the
+// item is overwritten before it reaches durability. ErrTimeout may
+// occur if the item isn't found durable in a reasonable amount of
+// time.
+func (b *Bucket) WaitForPersistence(k string, cas uint64, deletion bool) error {
+ timeout := 10 * time.Second
+ sleepDelay := 5 * time.Millisecond
+ start := time.Now()
+ for {
+ time.Sleep(sleepDelay)
+ sleepDelay += sleepDelay / 2 // multiply delay by 1.5 every time
+
+ result, err := b.Observe(k)
+ if err != nil {
+ return err
+ }
+ if persisted, overwritten := result.CheckPersistence(cas, deletion); overwritten {
+ return ErrOverwritten
+ } else if persisted {
+ return nil
+ }
+
+ if result.PersistenceTime > 0 {
+ timeout = 2 * result.PersistenceTime
+ }
+ if time.Since(start) >= timeout-sleepDelay {
+ return ErrTimeout
+ }
+ }
+}
+
+var _STRING_MCRESPONSE_POOL = gomemcached.NewStringMCResponsePool(16)
+
+type stringPool struct {
+ pool *sync.Pool
+ size int
+}
+
+func newStringPool(size int) *stringPool {
+ rv := &stringPool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return make([]string, 0, size)
+ },
+ },
+ size: size,
+ }
+
+ return rv
+}
+
+func (this *stringPool) Get() []string {
+ return this.pool.Get().([]string)
+}
+
+func (this *stringPool) Put(s []string) {
+ if s == nil || cap(s) < this.size || cap(s) > 2*this.size {
+ return
+ }
+
+ this.pool.Put(s[0:0])
+}
+
+var _STRING_POOL = newStringPool(16)
+
+type vbStringPool struct {
+ pool *sync.Pool
+ strPool *stringPool
+}
+
+func newVBStringPool(size int, sp *stringPool) *vbStringPool {
+ rv := &vbStringPool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return make(map[uint16][]string, size)
+ },
+ },
+ strPool: sp,
+ }
+
+ return rv
+}
+
+func (this *vbStringPool) Get() map[uint16][]string {
+ return this.pool.Get().(map[uint16][]string)
+}
+
+func (this *vbStringPool) Put(s map[uint16][]string) {
+ if s == nil {
+ return
+ }
+
+ for k, v := range s {
+ delete(s, k)
+ this.strPool.Put(v)
+ }
+
+ this.pool.Put(s)
+}
+
+var _VB_STRING_POOL = newVBStringPool(16, _STRING_POOL)
diff --git a/vendor/github.com/couchbase/go-couchbase/conn_pool.go b/vendor/github.com/couchbase/go-couchbase/conn_pool.go
new file mode 100644
index 0000000..e1ee6c9
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/conn_pool.go
@@ -0,0 +1,415 @@
+package couchbase
+
+import (
+ "crypto/tls"
+ "errors"
+ "sync/atomic"
+ "time"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/gomemcached/client"
+ "github.com/couchbase/goutils/logging"
+)
+
+// GenericMcdAuthHandler is a kind of AuthHandler that performs
+// special auth exchange (like non-standard auth, possibly followed by
+// select-bucket).
+type GenericMcdAuthHandler interface {
+ AuthHandler
+ AuthenticateMemcachedConn(host string, conn *memcached.Client) error
+}
+
+// Error raised when a connection can't be retrieved from a pool.
+var TimeoutError = errors.New("timeout waiting to build connection")
+var errClosedPool = errors.New("the connection pool is closed")
+var errNoPool = errors.New("no connection pool")
+
+// Default timeout for retrieving a connection from the pool.
+var ConnPoolTimeout = time.Hour * 24 * 30
+
+// overflow connection closer cycle time
+var ConnCloserInterval = time.Second * 30
+
+// ConnPoolAvailWaitTime is the amount of time to wait for an existing
+// connection from the pool before considering the creation of a new
+// one.
+var ConnPoolAvailWaitTime = time.Millisecond
+
+type connectionPool struct {
+ host string
+ mkConn func(host string, ah AuthHandler, tlsConfig *tls.Config, bucketName string) (*memcached.Client, error)
+ auth AuthHandler
+ connections chan *memcached.Client
+ createsem chan bool
+ bailOut chan bool
+ poolSize int
+ connCount uint64
+ inUse bool
+ tlsConfig *tls.Config
+ bucket string
+}
+
+func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolOverflow int, tlsConfig *tls.Config, bucket string) *connectionPool {
+ connSize := poolSize
+ if closer {
+ connSize += poolOverflow
+ }
+ rv := &connectionPool{
+ host: host,
+ connections: make(chan *memcached.Client, connSize),
+ createsem: make(chan bool, poolSize+poolOverflow),
+ mkConn: defaultMkConn,
+ auth: ah,
+ poolSize: poolSize,
+ tlsConfig: tlsConfig,
+ bucket: bucket,
+ }
+ if closer {
+ rv.bailOut = make(chan bool, 1)
+ go rv.connCloser()
+ }
+ return rv
+}
+
+// ConnPoolTimeout is notified whenever connections are acquired from a pool.
+var ConnPoolCallback func(host string, source string, start time.Time, err error)
+
+// Use regular in-the-clear connection if tlsConfig is nil.
+// Use secure connection (TLS) if tlsConfig is set.
+func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketName string) (*memcached.Client, error) {
+ var features memcached.Features
+
+ var conn *memcached.Client
+ var err error
+ if tlsConfig == nil {
+ conn, err = memcached.Connect("tcp", host)
+ } else {
+ conn, err = memcached.ConnectTLS("tcp", host, tlsConfig)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
+ }
+
+ if TCPKeepalive == true {
+ conn.SetKeepAliveOptions(time.Duration(TCPKeepaliveInterval) * time.Second)
+ }
+
+ if EnableMutationToken == true {
+ features = append(features, memcached.FeatureMutationToken)
+ }
+ if EnableDataType == true {
+ features = append(features, memcached.FeatureDataType)
+ }
+
+ if EnableXattr == true {
+ features = append(features, memcached.FeatureXattr)
+ }
+
+ if EnableCollections {
+ features = append(features, memcached.FeatureCollections)
+ }
+
+ if len(features) > 0 {
+ res, err := conn.EnableFeatures(features)
+ if err != nil && isTimeoutError(err) {
+ conn.Close()
+ return nil, err
+ }
+
+ if err != nil || res.Status != gomemcached.SUCCESS {
+ logging.Warnf("Unable to enable features %v", err)
+ }
+ }
+
+ if gah, ok := ah.(GenericMcdAuthHandler); ok {
+ err = gah.AuthenticateMemcachedConn(host, conn)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(noDeadline)
+ }
+
+ return conn, nil
+ }
+ name, pass, bucket := ah.GetCredentials()
+ if bucket == "" {
+ // Authenticator does not know specific bucket.
+ bucket = bucketName
+ }
+
+ if name != "default" {
+ _, err = conn.Auth(name, pass)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ // Select bucket (Required for cb_auth creds)
+ // Required when doing auth with _admin credentials
+ if bucket != "" && bucket != name {
+ _, err = conn.SelectBucket(bucket)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ }
+ }
+
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(noDeadline)
+ }
+
+ return conn, nil
+}
+
+func (cp *connectionPool) Close() (err error) {
+ defer func() {
+ if recover() != nil {
+ err = errors.New("connectionPool.Close error")
+ }
+ }()
+ if cp.bailOut != nil {
+
+ // defensively, we won't wait if the channel is full
+ select {
+ case cp.bailOut <- false:
+ default:
+ }
+ }
+ close(cp.connections)
+ for c := range cp.connections {
+ c.Close()
+ }
+ return
+}
+
+func (cp *connectionPool) Node() string {
+ return cp.host
+}
+
+func (cp *connectionPool) GetWithTimeout(d time.Duration) (rv *memcached.Client, err error) {
+ if cp == nil {
+ return nil, errNoPool
+ }
+
+ path := ""
+
+ if ConnPoolCallback != nil {
+ defer func(path *string, start time.Time) {
+ ConnPoolCallback(cp.host, *path, start, err)
+ }(&path, time.Now())
+ }
+
+ path = "short-circuit"
+
+ // short-circuit available connetions.
+ select {
+ case rv, isopen := <-cp.connections:
+ if !isopen {
+ return nil, errClosedPool
+ }
+ atomic.AddUint64(&cp.connCount, 1)
+ return rv, nil
+ default:
+ }
+
+ t := time.NewTimer(ConnPoolAvailWaitTime)
+ defer t.Stop()
+
+ // Try to grab an available connection within 1ms
+ select {
+ case rv, isopen := <-cp.connections:
+ path = "avail1"
+ if !isopen {
+ return nil, errClosedPool
+ }
+ atomic.AddUint64(&cp.connCount, 1)
+ return rv, nil
+ case <-t.C:
+ // No connection came around in time, let's see
+ // whether we can get one or build a new one first.
+ t.Reset(d) // Reuse the timer for the full timeout.
+ select {
+ case rv, isopen := <-cp.connections:
+ path = "avail2"
+ if !isopen {
+ return nil, errClosedPool
+ }
+ atomic.AddUint64(&cp.connCount, 1)
+ return rv, nil
+ case cp.createsem <- true:
+ path = "create"
+ // Build a connection if we can't get a real one.
+ // This can potentially be an overflow connection, or
+ // a pooled connection.
+ rv, err := cp.mkConn(cp.host, cp.auth, cp.tlsConfig, cp.bucket)
+ if err != nil {
+ // On error, release our create hold
+ <-cp.createsem
+ } else {
+ atomic.AddUint64(&cp.connCount, 1)
+ }
+ return rv, err
+ case <-t.C:
+ return nil, ErrTimeout
+ }
+ }
+}
+
+func (cp *connectionPool) Get() (*memcached.Client, error) {
+ return cp.GetWithTimeout(ConnPoolTimeout)
+}
+
+func (cp *connectionPool) Return(c *memcached.Client) {
+ if c == nil {
+ return
+ }
+
+ if cp == nil {
+ c.Close()
+ }
+
+ if c.IsHealthy() {
+ defer func() {
+ if recover() != nil {
+ // This happens when the pool has already been
+ // closed and we're trying to return a
+ // connection to it anyway. Just close the
+ // connection.
+ c.Close()
+ }
+ }()
+
+ select {
+ case cp.connections <- c:
+ default:
+ <-cp.createsem
+ c.Close()
+ }
+ } else {
+ <-cp.createsem
+ c.Close()
+ }
+}
+
+// give the ability to discard a connection from a pool
+// useful for ditching connections to the wrong node after a rebalance
+func (cp *connectionPool) Discard(c *memcached.Client) {
+ <-cp.createsem
+ c.Close()
+}
+
+// asynchronous connection closer
+func (cp *connectionPool) connCloser() {
+ var connCount uint64
+
+ t := time.NewTimer(ConnCloserInterval)
+ defer t.Stop()
+
+ for {
+ connCount = cp.connCount
+
+ // we don't exist anymore! bail out!
+ select {
+ case <-cp.bailOut:
+ return
+ case <-t.C:
+ }
+ t.Reset(ConnCloserInterval)
+
+ // no overflow connections open or sustained requests for connections
+ // nothing to do until the next cycle
+ if len(cp.connections) <= cp.poolSize ||
+ ConnCloserInterval/ConnPoolAvailWaitTime < time.Duration(cp.connCount-connCount) {
+ continue
+ }
+
+ // close overflow connections now that they are not needed
+ for c := range cp.connections {
+ select {
+ case <-cp.bailOut:
+ return
+ default:
+ }
+
+ // bail out if close did not work out
+ if !cp.connCleanup(c) {
+ return
+ }
+ if len(cp.connections) <= cp.poolSize {
+ break
+ }
+ }
+ }
+}
+
+// close connection with recovery on error
+func (cp *connectionPool) connCleanup(c *memcached.Client) (rv bool) {
+
+ // just in case we are closing a connection after
+ // bailOut has been sent but we haven't yet read it
+ defer func() {
+ if recover() != nil {
+ rv = false
+ }
+ }()
+ rv = true
+
+ c.Close()
+ <-cp.createsem
+ return
+}
+
+func (cp *connectionPool) StartTapFeed(args *memcached.TapArguments) (*memcached.TapFeed, error) {
+ if cp == nil {
+ return nil, errNoPool
+ }
+ mc, err := cp.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ // A connection can't be used after TAP; Dont' count it against the
+ // connection pool capacity
+ <-cp.createsem
+
+ return mc.StartTapFeed(*args)
+}
+
+const DEFAULT_WINDOW_SIZE = 20 * 1024 * 1024 // 20 Mb
+
+func (cp *connectionPool) StartUprFeed(name string, sequence uint32, dcp_buffer_size uint32, data_chan_size int) (*memcached.UprFeed, error) {
+ if cp == nil {
+ return nil, errNoPool
+ }
+ mc, err := cp.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ // A connection can't be used after it has been allocated to UPR;
+ // Dont' count it against the connection pool capacity
+ <-cp.createsem
+
+ uf, err := mc.NewUprFeed()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := uf.UprOpen(name, sequence, dcp_buffer_size); err != nil {
+ return nil, err
+ }
+
+ if err := uf.StartFeedWithConfig(data_chan_size); err != nil {
+ return nil, err
+ }
+
+ return uf, nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/ddocs.go b/vendor/github.com/couchbase/go-couchbase/ddocs.go
new file mode 100644
index 0000000..f9cc343
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/ddocs.go
@@ -0,0 +1,288 @@
+package couchbase
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/couchbase/goutils/logging"
+ "io/ioutil"
+ "net/http"
+)
+
+// ViewDefinition represents a single view within a design document.
+type ViewDefinition struct {
+ Map string `json:"map"`
+ Reduce string `json:"reduce,omitempty"`
+}
+
+// DDoc is the document body of a design document specifying a view.
+type DDoc struct {
+ Language string `json:"language,omitempty"`
+ Views map[string]ViewDefinition `json:"views"`
+}
+
+// DDocsResult represents the result from listing the design
+// documents.
+type DDocsResult struct {
+ Rows []struct {
+ DDoc struct {
+ Meta map[string]interface{}
+ JSON DDoc
+ } `json:"doc"`
+ } `json:"rows"`
+}
+
+// GetDDocs lists all design documents
+func (b *Bucket) GetDDocs() (DDocsResult, error) {
+ var ddocsResult DDocsResult
+ b.RLock()
+ pool := b.pool
+ uri := b.DDocs.URI
+ b.RUnlock()
+
+ // MB-23555 ephemeral buckets have no ddocs
+ if uri == "" {
+ return DDocsResult{}, nil
+ }
+
+ err := pool.client.parseURLResponse(uri, &ddocsResult)
+ if err != nil {
+ return DDocsResult{}, err
+ }
+ return ddocsResult, nil
+}
+
+func (b *Bucket) GetDDocWithRetry(docname string, into interface{}) error {
+ ddocURI := fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
+ err := b.parseAPIResponse(ddocURI, &into)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Bucket) GetDDocsWithRetry() (DDocsResult, error) {
+ var ddocsResult DDocsResult
+ b.RLock()
+ uri := b.DDocs.URI
+ b.RUnlock()
+
+ // MB-23555 ephemeral buckets have no ddocs
+ if uri == "" {
+ return DDocsResult{}, nil
+ }
+
+ err := b.parseURLResponse(uri, &ddocsResult)
+ if err != nil {
+ return DDocsResult{}, err
+ }
+ return ddocsResult, nil
+}
+
+func (b *Bucket) ddocURL(docname string) (string, error) {
+ u, err := b.randomBaseURL()
+ if err != nil {
+ return "", err
+ }
+ u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
+ return u.String(), nil
+}
+
+func (b *Bucket) ddocURLNext(nodeId int, docname string) (string, int, error) {
+ u, selected, err := b.randomNextURL(nodeId)
+ if err != nil {
+ return "", -1, err
+ }
+ u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
+ return u.String(), selected, nil
+}
+
+const ABS_MAX_RETRIES = 10
+const ABS_MIN_RETRIES = 3
+
+func (b *Bucket) getMaxRetries() (int, error) {
+
+ maxRetries := len(b.Nodes())
+
+ if maxRetries == 0 {
+ return 0, fmt.Errorf("No available Couch rest URLs")
+ }
+
+ if maxRetries > ABS_MAX_RETRIES {
+ maxRetries = ABS_MAX_RETRIES
+ } else if maxRetries < ABS_MIN_RETRIES {
+ maxRetries = ABS_MIN_RETRIES
+ }
+
+ return maxRetries, nil
+}
+
+// PutDDoc installs a design document.
+func (b *Bucket) PutDDoc(docname string, value interface{}) error {
+
+ var Err error
+
+ maxRetries, err := b.getMaxRetries()
+ if err != nil {
+ return err
+ }
+
+ lastNode := START_NODE_ID
+
+ for retryCount := 0; retryCount < maxRetries; retryCount++ {
+
+ Err = nil
+
+ ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
+ if err != nil {
+ return err
+ }
+
+ lastNode = selectedNode
+
+ logging.Infof(" Trying with selected node %d", selectedNode)
+ j, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("PUT", ddocU, bytes.NewReader(j))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */))
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+
+ if res.StatusCode != 201 {
+ body, _ := ioutil.ReadAll(res.Body)
+ Err = fmt.Errorf("error installing view: %v / %s",
+ res.Status, body)
+ logging.Errorf(" Error in PutDDOC %v. Retrying...", Err)
+ res.Body.Close()
+ b.Refresh()
+ continue
+ }
+
+ res.Body.Close()
+ break
+ }
+
+ return Err
+}
+
+// GetDDoc retrieves a specific a design doc.
+func (b *Bucket) GetDDoc(docname string, into interface{}) error {
+ var Err error
+ var res *http.Response
+
+ maxRetries, err := b.getMaxRetries()
+ if err != nil {
+ return err
+ }
+
+ lastNode := START_NODE_ID
+ for retryCount := 0; retryCount < maxRetries; retryCount++ {
+
+ Err = nil
+ ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
+ if err != nil {
+ return err
+ }
+
+ lastNode = selectedNode
+ logging.Infof(" Trying with selected node %d", selectedNode)
+
+ req, err := http.NewRequest("GET", ddocU, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */))
+ if err != nil {
+ return err
+ }
+
+ res, err = doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+ if res.StatusCode != 200 {
+ body, _ := ioutil.ReadAll(res.Body)
+ Err = fmt.Errorf("error reading view: %v / %s",
+ res.Status, body)
+ logging.Errorf(" Error in GetDDOC %v Retrying...", Err)
+ b.Refresh()
+ res.Body.Close()
+ continue
+ }
+ defer res.Body.Close()
+ break
+ }
+
+ if Err != nil {
+ return Err
+ }
+
+ d := json.NewDecoder(res.Body)
+ return d.Decode(into)
+}
+
+// DeleteDDoc removes a design document.
+func (b *Bucket) DeleteDDoc(docname string) error {
+
+ var Err error
+
+ maxRetries, err := b.getMaxRetries()
+ if err != nil {
+ return err
+ }
+
+ lastNode := START_NODE_ID
+
+ for retryCount := 0; retryCount < maxRetries; retryCount++ {
+
+ Err = nil
+ ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
+ if err != nil {
+ return err
+ }
+
+ lastNode = selectedNode
+ logging.Infof(" Trying with selected node %d", selectedNode)
+
+ req, err := http.NewRequest("DELETE", ddocU, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ err = maybeAddAuth(req, b.authHandler(false /* bucket not already locked */))
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+ if res.StatusCode != 200 {
+ body, _ := ioutil.ReadAll(res.Body)
+ Err = fmt.Errorf("error deleting view : %v / %s", res.Status, body)
+ logging.Errorf(" Error in DeleteDDOC %v. Retrying ... ", Err)
+ b.Refresh()
+ res.Body.Close()
+ continue
+ }
+
+ res.Body.Close()
+ break
+ }
+ return Err
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/observe.go b/vendor/github.com/couchbase/go-couchbase/observe.go
new file mode 100644
index 0000000..6e746f5
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/observe.go
@@ -0,0 +1,300 @@
+package couchbase
+
+import (
+ "fmt"
+ "github.com/couchbase/goutils/logging"
+ "sync"
+)
+
+type PersistTo uint8
+
+const (
+ PersistNone = PersistTo(0x00)
+ PersistMaster = PersistTo(0x01)
+ PersistOne = PersistTo(0x02)
+ PersistTwo = PersistTo(0x03)
+ PersistThree = PersistTo(0x04)
+ PersistFour = PersistTo(0x05)
+)
+
+type ObserveTo uint8
+
+const (
+ ObserveNone = ObserveTo(0x00)
+ ObserveReplicateOne = ObserveTo(0x01)
+ ObserveReplicateTwo = ObserveTo(0x02)
+ ObserveReplicateThree = ObserveTo(0x03)
+ ObserveReplicateFour = ObserveTo(0x04)
+)
+
+type JobType uint8
+
+const (
+ OBSERVE = JobType(0x00)
+ PERSIST = JobType(0x01)
+)
+
+type ObservePersistJob struct {
+ vb uint16
+ vbuuid uint64
+ hostname string
+ jobType JobType
+ failover uint8
+ lastPersistedSeqNo uint64
+ currentSeqNo uint64
+ resultChan chan *ObservePersistJob
+ errorChan chan *OPErrResponse
+}
+
+type OPErrResponse struct {
+ vb uint16
+ vbuuid uint64
+ err error
+ job *ObservePersistJob
+}
+
+var ObservePersistPool = NewPool(1024)
+var OPJobChan = make(chan *ObservePersistJob, 1024)
+var OPJobDone = make(chan bool)
+
+var wg sync.WaitGroup
+
+func (b *Bucket) StartOPPollers(maxWorkers int) {
+
+ for i := 0; i < maxWorkers; i++ {
+ go b.OPJobPoll()
+ wg.Add(1)
+ }
+ wg.Wait()
+}
+
+func (b *Bucket) SetObserveAndPersist(nPersist PersistTo, nObserve ObserveTo) (err error) {
+
+ numNodes := len(b.Nodes())
+ if int(nPersist) > numNodes || int(nObserve) > numNodes {
+ return fmt.Errorf("Not enough healthy nodes in the cluster")
+ }
+
+ if int(nPersist) > (b.Replicas+1) || int(nObserve) > b.Replicas {
+ return fmt.Errorf("Not enough replicas in the cluster")
+ }
+
+ if EnableMutationToken == false {
+ return fmt.Errorf("Mutation Tokens not enabled ")
+ }
+
+ b.ds = &DurablitySettings{Persist: PersistTo(nPersist), Observe: ObserveTo(nObserve)}
+ return
+}
+
+func (b *Bucket) ObserveAndPersistPoll(vb uint16, vbuuid uint64, seqNo uint64) (err error, failover bool) {
+ b.RLock()
+ ds := b.ds
+ b.RUnlock()
+
+ if ds == nil {
+ return
+ }
+
+ nj := 0 // total number of jobs
+ resultChan := make(chan *ObservePersistJob, 10)
+ errChan := make(chan *OPErrResponse, 10)
+
+ nodes := b.GetNodeList(vb)
+ if int(ds.Observe) > len(nodes) || int(ds.Persist) > len(nodes) {
+ return fmt.Errorf("Not enough healthy nodes in the cluster"), false
+ }
+
+ logging.Infof("Node list %v", nodes)
+
+ if ds.Observe >= ObserveReplicateOne {
+ // create a job for each host
+ for i := ObserveReplicateOne; i < ds.Observe+1; i++ {
+ opJob := ObservePersistPool.Get()
+ opJob.vb = vb
+ opJob.vbuuid = vbuuid
+ opJob.jobType = OBSERVE
+ opJob.hostname = nodes[i]
+ opJob.resultChan = resultChan
+ opJob.errorChan = errChan
+
+ OPJobChan <- opJob
+ nj++
+
+ }
+ }
+
+ if ds.Persist >= PersistMaster {
+ for i := PersistMaster; i < ds.Persist+1; i++ {
+ opJob := ObservePersistPool.Get()
+ opJob.vb = vb
+ opJob.vbuuid = vbuuid
+ opJob.jobType = PERSIST
+ opJob.hostname = nodes[i]
+ opJob.resultChan = resultChan
+ opJob.errorChan = errChan
+
+ OPJobChan <- opJob
+ nj++
+
+ }
+ }
+
+ ok := true
+ for ok {
+ select {
+ case res := <-resultChan:
+ jobDone := false
+ if res.failover == 0 {
+ // no failover
+ if res.jobType == PERSIST {
+ if res.lastPersistedSeqNo >= seqNo {
+ jobDone = true
+ }
+
+ } else {
+ if res.currentSeqNo >= seqNo {
+ jobDone = true
+ }
+ }
+
+ if jobDone == true {
+ nj--
+ ObservePersistPool.Put(res)
+ } else {
+ // requeue this job
+ OPJobChan <- res
+ }
+
+ } else {
+ // Not currently handling failover scenarios TODO
+ nj--
+ ObservePersistPool.Put(res)
+ failover = true
+ }
+
+ if nj == 0 {
+ // done with all the jobs
+ ok = false
+ close(resultChan)
+ close(errChan)
+ }
+
+ case Err := <-errChan:
+ logging.Errorf("Error in Observe/Persist %v", Err.err)
+ err = fmt.Errorf("Error in Observe/Persist job %v", Err.err)
+ nj--
+ ObservePersistPool.Put(Err.job)
+ if nj == 0 {
+ close(resultChan)
+ close(errChan)
+ ok = false
+ }
+ }
+ }
+
+ return
+}
+
+func (b *Bucket) OPJobPoll() {
+
+ ok := true
+ for ok == true {
+ select {
+ case job := <-OPJobChan:
+ pool := b.getConnPoolByHost(job.hostname, false /* bucket not already locked */)
+ if pool == nil {
+ errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
+ errRes.err = fmt.Errorf("Pool not found for host %v", job.hostname)
+ errRes.job = job
+ job.errorChan <- errRes
+ continue
+ }
+ conn, err := pool.Get()
+ if err != nil {
+ errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
+ errRes.err = fmt.Errorf("Unable to get connection from pool %v", err)
+ errRes.job = job
+ job.errorChan <- errRes
+ continue
+ }
+
+ res, err := conn.ObserveSeq(job.vb, job.vbuuid)
+ if err != nil {
+ errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
+ errRes.err = fmt.Errorf("Command failed %v", err)
+ errRes.job = job
+ job.errorChan <- errRes
+ continue
+
+ }
+ pool.Return(conn)
+ job.lastPersistedSeqNo = res.LastPersistedSeqNo
+ job.currentSeqNo = res.CurrentSeqNo
+ job.failover = res.Failover
+
+ job.resultChan <- job
+ case <-OPJobDone:
+ logging.Infof("Observe Persist Poller exitting")
+ ok = false
+ }
+ }
+ wg.Done()
+}
+
+func (b *Bucket) GetNodeList(vb uint16) []string {
+
+ vbm := b.VBServerMap()
+ if len(vbm.VBucketMap) < int(vb) {
+ logging.Infof("vbmap smaller than vblist")
+ return nil
+ }
+
+ nodes := make([]string, len(vbm.VBucketMap[vb]))
+ for i := 0; i < len(vbm.VBucketMap[vb]); i++ {
+ n := vbm.VBucketMap[vb][i]
+ if n < 0 {
+ continue
+ }
+
+ node := b.getMasterNode(n)
+ if len(node) > 1 {
+ nodes[i] = node
+ }
+ continue
+
+ }
+ return nodes
+}
+
+//pool of ObservePersist Jobs
+type OPpool struct {
+ pool chan *ObservePersistJob
+}
+
+// NewPool creates a new pool of jobs
+func NewPool(max int) *OPpool {
+ return &OPpool{
+ pool: make(chan *ObservePersistJob, max),
+ }
+}
+
+// Borrow a Client from the pool.
+func (p *OPpool) Get() *ObservePersistJob {
+ var o *ObservePersistJob
+ select {
+ case o = <-p.pool:
+ default:
+ o = &ObservePersistJob{}
+ }
+ return o
+}
+
+// Return returns a Client to the pool.
+func (p *OPpool) Put(o *ObservePersistJob) {
+ select {
+ case p.pool <- o:
+ default:
+ // let it go, let it go...
+ }
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/pools.go b/vendor/github.com/couchbase/go-couchbase/pools.go
new file mode 100644
index 0000000..0e23793
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/pools.go
@@ -0,0 +1,1474 @@
+package couchbase
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ "github.com/couchbase/goutils/logging"
+
+ "github.com/couchbase/gomemcached" // package name is 'gomemcached'
+ "github.com/couchbase/gomemcached/client" // package name is 'memcached'
+)
+
+// HTTPClient to use for REST and view operations.
+var MaxIdleConnsPerHost = 256
+var ClientTimeOut = 10 * time.Second
+var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
+var HTTPClient = &http.Client{Transport: HTTPTransport, Timeout: ClientTimeOut}
+
+// PoolSize is the size of each connection pool (per host).
+var PoolSize = 64
+
+// PoolOverflow is the number of overflow connections allowed in a
+// pool.
+var PoolOverflow = 16
+
+// AsynchronousCloser turns on asynchronous closing for overflow connections
+var AsynchronousCloser = false
+
+// TCP KeepAlive enabled/disabled
+var TCPKeepalive = false
+
+// Enable MutationToken
+var EnableMutationToken = false
+
+// Enable Data Type response
+var EnableDataType = false
+
+// Enable Xattr
+var EnableXattr = false
+
+// Enable Collections
+var EnableCollections = false
+
+// TCP keepalive interval in seconds. Default 30 minutes
+var TCPKeepaliveInterval = 30 * 60
+
+// Used to decide whether to skip verification of certificates when
+// connecting to an ssl port.
+var skipVerify = true
+var certFile = ""
+var keyFile = ""
+var rootFile = ""
+
+func SetSkipVerify(skip bool) {
+ skipVerify = skip
+}
+
+func SetCertFile(cert string) {
+ certFile = cert
+}
+
+func SetKeyFile(cert string) {
+ keyFile = cert
+}
+
+func SetRootFile(cert string) {
+ rootFile = cert
+}
+
+// Allow applications to speciify the Poolsize and Overflow
+func SetConnectionPoolParams(size, overflow int) {
+
+ if size > 0 {
+ PoolSize = size
+ }
+
+ if overflow > 0 {
+ PoolOverflow = overflow
+ }
+}
+
+// Turn off overflow connections
+func DisableOverflowConnections() {
+ PoolOverflow = 0
+}
+
+// Toggle asynchronous overflow closer
+func EnableAsynchronousCloser(closer bool) {
+ AsynchronousCloser = closer
+}
+
+// Allow TCP keepalive parameters to be set by the application
+func SetTcpKeepalive(enabled bool, interval int) {
+
+ TCPKeepalive = enabled
+
+ if interval > 0 {
+ TCPKeepaliveInterval = interval
+ }
+}
+
+// AuthHandler is a callback that gets the auth username and password
+// for the given bucket.
+type AuthHandler interface {
+ GetCredentials() (string, string, string)
+}
+
+// AuthHandler is a callback that gets the auth username and password
+// for the given bucket and sasl for memcached.
+type AuthWithSaslHandler interface {
+ AuthHandler
+ GetSaslCredentials() (string, string)
+}
+
+// MultiBucketAuthHandler is kind of AuthHandler that may perform
+// different auth for different buckets.
+type MultiBucketAuthHandler interface {
+ AuthHandler
+ ForBucket(bucket string) AuthHandler
+}
+
+// HTTPAuthHandler is kind of AuthHandler that performs more general
+// for outgoing http requests than is possible via simple
+// GetCredentials() call (i.e. digest auth or different auth per
+// different destinations).
+type HTTPAuthHandler interface {
+ AuthHandler
+ SetCredsForRequest(req *http.Request) error
+}
+
+// RestPool represents a single pool returned from the pools REST API.
+type RestPool struct {
+ Name string `json:"name"`
+ StreamingURI string `json:"streamingUri"`
+ URI string `json:"uri"`
+}
+
+// Pools represents the collection of pools as returned from the REST API.
+type Pools struct {
+ ComponentsVersion map[string]string `json:"componentsVersion,omitempty"`
+ ImplementationVersion string `json:"implementationVersion"`
+ IsAdmin bool `json:"isAdminCreds"`
+ UUID string `json:"uuid"`
+ Pools []RestPool `json:"pools"`
+}
+
+// A Node is a computer in a cluster running the couchbase software.
+type Node struct {
+ ClusterCompatibility int `json:"clusterCompatibility"`
+ ClusterMembership string `json:"clusterMembership"`
+ CouchAPIBase string `json:"couchApiBase"`
+ Hostname string `json:"hostname"`
+ InterestingStats map[string]float64 `json:"interestingStats,omitempty"`
+ MCDMemoryAllocated float64 `json:"mcdMemoryAllocated"`
+ MCDMemoryReserved float64 `json:"mcdMemoryReserved"`
+ MemoryFree float64 `json:"memoryFree"`
+ MemoryTotal float64 `json:"memoryTotal"`
+ OS string `json:"os"`
+ Ports map[string]int `json:"ports"`
+ Services []string `json:"services"`
+ Status string `json:"status"`
+ Uptime int `json:"uptime,string"`
+ Version string `json:"version"`
+ ThisNode bool `json:"thisNode,omitempty"`
+}
+
+// A Pool of nodes and buckets.
+type Pool struct {
+ BucketMap map[string]*Bucket
+ Nodes []Node
+
+ BucketURL map[string]string `json:"buckets"`
+
+ client *Client
+}
+
+// VBucketServerMap is the a mapping of vbuckets to nodes.
+type VBucketServerMap struct {
+ HashAlgorithm string `json:"hashAlgorithm"`
+ NumReplicas int `json:"numReplicas"`
+ ServerList []string `json:"serverList"`
+ VBucketMap [][]int `json:"vBucketMap"`
+}
+
+type DurablitySettings struct {
+ Persist PersistTo
+ Observe ObserveTo
+}
+
+// Bucket is the primary entry point for most data operations.
+// Bucket is a locked data structure. All access to its fields should be done using read or write locking,
+// as appropriate.
+//
+// Some access methods require locking, but rely on the caller to do so. These are appropriate
+// for calls from methods that have already locked the structure. Methods like this
+// take a boolean parameter "bucketLocked".
+type Bucket struct {
+ sync.RWMutex
+ AuthType string `json:"authType"`
+ Capabilities []string `json:"bucketCapabilities"`
+ CapabilitiesVersion string `json:"bucketCapabilitiesVer"`
+ Type string `json:"bucketType"`
+ Name string `json:"name"`
+ NodeLocator string `json:"nodeLocator"`
+ Quota map[string]float64 `json:"quota,omitempty"`
+ Replicas int `json:"replicaNumber"`
+ Password string `json:"saslPassword"`
+ URI string `json:"uri"`
+ StreamingURI string `json:"streamingUri"`
+ LocalRandomKeyURI string `json:"localRandomKeyUri,omitempty"`
+ UUID string `json:"uuid"`
+ ConflictResolutionType string `json:"conflictResolutionType,omitempty"`
+ DDocs struct {
+ URI string `json:"uri"`
+ } `json:"ddocs,omitempty"`
+ BasicStats map[string]interface{} `json:"basicStats,omitempty"`
+ Controllers map[string]interface{} `json:"controllers,omitempty"`
+
+ // These are used for JSON IO, but isn't used for processing
+ // since it needs to be swapped out safely.
+ VBSMJson VBucketServerMap `json:"vBucketServerMap"`
+ NodesJSON []Node `json:"nodes"`
+
+ pool *Pool
+ connPools unsafe.Pointer // *[]*connectionPool
+ vBucketServerMap unsafe.Pointer // *VBucketServerMap
+ nodeList unsafe.Pointer // *[]Node
+ commonSufix string
+ ah AuthHandler // auth handler
+ ds *DurablitySettings // Durablity Settings for this bucket
+ closed bool
+}
+
+// PoolServices is all the bucket-independent services in a pool
+type PoolServices struct {
+ Rev int `json:"rev"`
+ NodesExt []NodeServices `json:"nodesExt"`
+ Capabilities json.RawMessage `json:"clusterCapabilities"`
+}
+
+// NodeServices is all the bucket-independent services running on
+// a node (given by Hostname)
+type NodeServices struct {
+ Services map[string]int `json:"services,omitempty"`
+ Hostname string `json:"hostname"`
+ ThisNode bool `json:"thisNode"`
+}
+
+type BucketNotFoundError struct {
+ bucket string
+}
+
+func (e *BucketNotFoundError) Error() string {
+ return fmt.Sprint("No bucket named " + e.bucket)
+}
+
+type BucketAuth struct {
+ name string
+ saslPwd string
+ bucket string
+}
+
+func newBucketAuth(name string, pass string, bucket string) *BucketAuth {
+ return &BucketAuth{name: name, saslPwd: pass, bucket: bucket}
+}
+
+func (ba *BucketAuth) GetCredentials() (string, string, string) {
+ return ba.name, ba.saslPwd, ba.bucket
+}
+
+// VBServerMap returns the current VBucketServerMap.
+func (b *Bucket) VBServerMap() *VBucketServerMap {
+ b.RLock()
+ defer b.RUnlock()
+ ret := (*VBucketServerMap)(b.vBucketServerMap)
+ return ret
+}
+
+func (b *Bucket) GetVBmap(addrs []string) (map[string][]uint16, error) {
+ vbmap := b.VBServerMap()
+ servers := vbmap.ServerList
+ if addrs == nil {
+ addrs = vbmap.ServerList
+ }
+
+ m := make(map[string][]uint16)
+ for _, addr := range addrs {
+ m[addr] = make([]uint16, 0)
+ }
+ for vbno, idxs := range vbmap.VBucketMap {
+ if len(idxs) == 0 {
+ return nil, fmt.Errorf("vbmap: No KV node no for vb %d", vbno)
+ } else if idxs[0] < 0 || idxs[0] >= len(servers) {
+ return nil, fmt.Errorf("vbmap: Invalid KV node no %d for vb %d", idxs[0], vbno)
+ }
+ addr := servers[idxs[0]]
+ if _, ok := m[addr]; ok {
+ m[addr] = append(m[addr], uint16(vbno))
+ }
+ }
+ return m, nil
+}
+
+// true if node is not on the bucket VBmap
+func (b *Bucket) checkVBmap(node string) bool {
+ vbmap := b.VBServerMap()
+ servers := vbmap.ServerList
+
+ for _, idxs := range vbmap.VBucketMap {
+ if len(idxs) == 0 {
+ return true
+ } else if idxs[0] < 0 || idxs[0] >= len(servers) {
+ return true
+ }
+ if servers[idxs[0]] == node {
+ return false
+ }
+ }
+ return true
+}
+
+func (b *Bucket) GetName() string {
+ b.RLock()
+ defer b.RUnlock()
+ ret := b.Name
+ return ret
+}
+
+// Nodes returns the current list of nodes servicing this bucket.
+func (b *Bucket) Nodes() []Node {
+ b.RLock()
+ defer b.RUnlock()
+ ret := *(*[]Node)(b.nodeList)
+ return ret
+}
+
+// return the list of healthy nodes
+func (b *Bucket) HealthyNodes() []Node {
+ nodes := []Node{}
+
+ for _, n := range b.Nodes() {
+ if n.Status == "healthy" && n.CouchAPIBase != "" {
+ nodes = append(nodes, n)
+ }
+ if n.Status != "healthy" { // log non-healthy node
+ logging.Infof("Non-healthy node; node details:")
+ logging.Infof("Hostname=%v, Status=%v, CouchAPIBase=%v, ThisNode=%v", n.Hostname, n.Status, n.CouchAPIBase, n.ThisNode)
+ }
+ }
+
+ return nodes
+}
+
+func (b *Bucket) getConnPools(bucketLocked bool) []*connectionPool {
+ if !bucketLocked {
+ b.RLock()
+ defer b.RUnlock()
+ }
+ if b.connPools != nil {
+ return *(*[]*connectionPool)(b.connPools)
+ } else {
+ return nil
+ }
+}
+
+func (b *Bucket) replaceConnPools(with []*connectionPool) {
+ b.Lock()
+ defer b.Unlock()
+
+ old := b.connPools
+ b.connPools = unsafe.Pointer(&with)
+ if old != nil {
+ for _, pool := range *(*[]*connectionPool)(old) {
+ if pool != nil {
+ pool.Close()
+ }
+ }
+ }
+ return
+}
+
+func (b *Bucket) getConnPool(i int) *connectionPool {
+
+ if i < 0 {
+ return nil
+ }
+
+ p := b.getConnPools(false /* not already locked */)
+ if len(p) > i {
+ return p[i]
+ }
+
+ return nil
+}
+
+func (b *Bucket) getConnPoolByHost(host string, bucketLocked bool) *connectionPool {
+ pools := b.getConnPools(bucketLocked)
+ for _, p := range pools {
+ if p != nil && p.host == host {
+ return p
+ }
+ }
+
+ return nil
+}
+
+// Given a vbucket number, returns a memcached connection to it.
+// The connection must be returned to its pool after use.
+func (b *Bucket) getConnectionToVBucket(vb uint32) (*memcached.Client, *connectionPool, error) {
+ for {
+ vbm := b.VBServerMap()
+ if len(vbm.VBucketMap) < int(vb) {
+ return nil, nil, fmt.Errorf("go-couchbase: vbmap smaller than vbucket list: %v vs. %v",
+ vb, vbm.VBucketMap)
+ }
+ masterId := vbm.VBucketMap[vb][0]
+ if masterId < 0 {
+ return nil, nil, fmt.Errorf("go-couchbase: No master for vbucket %d", vb)
+ }
+ pool := b.getConnPool(masterId)
+ conn, err := pool.Get()
+ if err != errClosedPool {
+ return conn, pool, err
+ }
+ // If conn pool was closed, because another goroutine refreshed the vbucket map, retry...
+ }
+}
+
+// To get random documents, we need to cover all the nodes, so select
+// a connection at random.
+
+func (b *Bucket) getRandomConnection() (*memcached.Client, *connectionPool, error) {
+ for {
+ var currentPool = 0
+ pools := b.getConnPools(false /* not already locked */)
+ if len(pools) == 0 {
+ return nil, nil, fmt.Errorf("No connection pool found")
+ } else if len(pools) > 1 { // choose a random connection
+ currentPool = rand.Intn(len(pools))
+ } // if only one pool, currentPool defaults to 0, i.e., the only pool
+
+ // get the pool
+ pool := pools[currentPool]
+ conn, err := pool.Get()
+ if err != errClosedPool {
+ return conn, pool, err
+ }
+
+ // If conn pool was closed, because another goroutine refreshed the vbucket map, retry...
+ }
+}
+
+//
+// Get a random document from a bucket. Since the bucket may be distributed
+// across nodes, we must first select a random connection, and then use the
+// Client.GetRandomDoc() call to get a random document from that node.
+//
+
+func (b *Bucket) GetRandomDoc() (*gomemcached.MCResponse, error) {
+ // get a connection from the pool
+ conn, pool, err := b.getRandomConnection()
+
+ if err != nil {
+ return nil, err
+ }
+
+ // We may need to select the bucket before GetRandomDoc()
+ // will work. This is sometimes done at startup (see defaultMkConn())
+ // but not always, depending on the auth type.
+ _, err = conn.SelectBucket(b.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ // get a randomm document from the connection
+ doc, err := conn.GetRandomDoc()
+ // need to return the connection to the pool
+ pool.Return(conn)
+ return doc, err
+}
+
+func (b *Bucket) getMasterNode(i int) string {
+ p := b.getConnPools(false /* not already locked */)
+ if len(p) > i {
+ return p[i].host
+ }
+ return ""
+}
+
+func (b *Bucket) authHandler(bucketLocked bool) (ah AuthHandler) {
+ if !bucketLocked {
+ b.RLock()
+ defer b.RUnlock()
+ }
+ pool := b.pool
+ name := b.Name
+
+ if pool != nil {
+ ah = pool.client.ah
+ }
+ if mbah, ok := ah.(MultiBucketAuthHandler); ok {
+ return mbah.ForBucket(name)
+ }
+ if ah == nil {
+ ah = &basicAuth{name, ""}
+ }
+ return
+}
+
+// NodeAddresses gets the (sorted) list of memcached node addresses
+// (hostname:port).
+func (b *Bucket) NodeAddresses() []string {
+ vsm := b.VBServerMap()
+ rv := make([]string, len(vsm.ServerList))
+ copy(rv, vsm.ServerList)
+ sort.Strings(rv)
+ return rv
+}
+
+// CommonAddressSuffix finds the longest common suffix of all
+// host:port strings in the node list.
+func (b *Bucket) CommonAddressSuffix() string {
+ input := []string{}
+ for _, n := range b.Nodes() {
+ input = append(input, n.Hostname)
+ }
+ return FindCommonSuffix(input)
+}
+
+// A Client is the starting point for all services across all buckets
+// in a Couchbase cluster.
+type Client struct {
+ BaseURL *url.URL
+ ah AuthHandler
+ Info Pools
+ tlsConfig *tls.Config
+}
+
+func maybeAddAuth(req *http.Request, ah AuthHandler) error {
+ if hah, ok := ah.(HTTPAuthHandler); ok {
+ return hah.SetCredsForRequest(req)
+ }
+ if ah != nil {
+ user, pass, _ := ah.GetCredentials()
+ req.Header.Set("Authorization", "Basic "+
+ base64.StdEncoding.EncodeToString([]byte(user+":"+pass)))
+ }
+ return nil
+}
+
+// arbitary number, may need to be tuned #FIXME
+const HTTP_MAX_RETRY = 5
+
+// Someday golang network packages will implement standard
+// error codes. Until then #sigh
+func isHttpConnError(err error) bool {
+
+ estr := err.Error()
+ return strings.Contains(estr, "broken pipe") ||
+ strings.Contains(estr, "broken connection") ||
+ strings.Contains(estr, "connection reset")
+}
+
+var client *http.Client
+
+func ClientConfigForX509(certFile, keyFile, rootFile string) (*tls.Config, error) {
+ cfg := &tls.Config{}
+
+ if certFile != "" && keyFile != "" {
+ tlsCert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+ cfg.Certificates = []tls.Certificate{tlsCert}
+ } else {
+ //error need to pass both certfile and keyfile
+ return nil, fmt.Errorf("N1QL: Need to pass both certfile and keyfile")
+ }
+
+ var caCert []byte
+ var err1 error
+
+ caCertPool := x509.NewCertPool()
+ if rootFile != "" {
+ // Read that value in
+ caCert, err1 = ioutil.ReadFile(rootFile)
+ if err1 != nil {
+ return nil, fmt.Errorf(" Error in reading cacert file, err: %v", err1)
+ }
+ caCertPool.AppendCertsFromPEM(caCert)
+ }
+
+ cfg.RootCAs = caCertPool
+ return cfg, nil
+}
+
+func doHTTPRequest(req *http.Request) (*http.Response, error) {
+
+ var err error
+ var res *http.Response
+
+ // we need a client that ignores certificate errors, since we self-sign
+ // our certs
+ if client == nil && req.URL.Scheme == "https" {
+ var tr *http.Transport
+
+ if skipVerify {
+ tr = &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ } else {
+ // Handle cases with cert
+
+ cfg, err := ClientConfigForX509(certFile, keyFile, rootFile)
+ if err != nil {
+ return nil, err
+ }
+
+ tr = &http.Transport{
+ TLSClientConfig: cfg,
+ }
+ }
+
+ client = &http.Client{Transport: tr}
+
+ } else if client == nil {
+ client = HTTPClient
+ }
+
+ for i := 0; i < HTTP_MAX_RETRY; i++ {
+ res, err = client.Do(req)
+ if err != nil && isHttpConnError(err) {
+ continue
+ }
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, err
+}
+
+func doPutAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}) error {
+ return doOutputAPI("PUT", baseURL, path, params, authHandler, out)
+}
+
+func doPostAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}) error {
+ return doOutputAPI("POST", baseURL, path, params, authHandler, out)
+}
+
+func doOutputAPI(
+ httpVerb string,
+ baseURL *url.URL,
+ path string,
+ params map[string]interface{},
+ authHandler AuthHandler,
+ out interface{}) error {
+
+ var requestUrl string
+
+ if q := strings.Index(path, "?"); q > 0 {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path[:q] + "?" + path[q+1:]
+ } else {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path
+ }
+
+ postData := url.Values{}
+ for k, v := range params {
+ postData.Set(k, fmt.Sprintf("%v", v))
+ }
+
+ req, err := http.NewRequest(httpVerb, requestUrl, bytes.NewBufferString(postData.Encode()))
+ if err != nil {
+ return err
+ }
+
+ req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+
+ err = maybeAddAuth(req, authHandler)
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ return fmt.Errorf("HTTP error %v getting %q: %s",
+ res.Status, requestUrl, bod)
+ }
+
+ d := json.NewDecoder(res.Body)
+ if err = d.Decode(&out); err != nil {
+ return err
+ }
+ return nil
+}
+
+func queryRestAPI(
+ baseURL *url.URL,
+ path string,
+ authHandler AuthHandler,
+ out interface{}) error {
+
+ var requestUrl string
+
+ if q := strings.Index(path, "?"); q > 0 {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path[:q] + "?" + path[q+1:]
+ } else {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path
+ }
+
+ req, err := http.NewRequest("GET", requestUrl, nil)
+ if err != nil {
+ return err
+ }
+
+ err = maybeAddAuth(req, authHandler)
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ return fmt.Errorf("HTTP error %v getting %q: %s",
+ res.Status, requestUrl, bod)
+ }
+
+ d := json.NewDecoder(res.Body)
+ if err = d.Decode(&out); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *Client) ProcessStream(path string, callb func(interface{}) error, data interface{}) error {
+ return c.processStream(c.BaseURL, path, c.ah, callb, data)
+}
+
+// Based on code in http://src.couchbase.org/source/xref/trunk/goproj/src/github.com/couchbase/indexing/secondary/dcp/pools.go#309
+func (c *Client) processStream(baseURL *url.URL, path string, authHandler AuthHandler, callb func(interface{}) error, data interface{}) error {
+ var requestUrl string
+
+ if q := strings.Index(path, "?"); q > 0 {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path[:q] + "?" + path[q+1:]
+ } else {
+ requestUrl = baseURL.Scheme + "://" + baseURL.Host + path
+ }
+
+ req, err := http.NewRequest("GET", requestUrl, nil)
+ if err != nil {
+ return err
+ }
+
+ err = maybeAddAuth(req, authHandler)
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return err
+ }
+
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ return fmt.Errorf("HTTP error %v getting %q: %s",
+ res.Status, requestUrl, bod)
+ }
+
+ reader := bufio.NewReader(res.Body)
+ for {
+ bs, err := reader.ReadBytes('\n')
+ if err != nil {
+ return err
+ }
+ if len(bs) == 1 && bs[0] == '\n' {
+ continue
+ }
+
+ err = json.Unmarshal(bs, data)
+ if err != nil {
+ return err
+ }
+ err = callb(data)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+
+}
+
+func (c *Client) parseURLResponse(path string, out interface{}) error {
+ return queryRestAPI(c.BaseURL, path, c.ah, out)
+}
+
+func (c *Client) parsePostURLResponse(path string, params map[string]interface{}, out interface{}) error {
+ return doPostAPI(c.BaseURL, path, params, c.ah, out)
+}
+
+func (c *Client) parsePutURLResponse(path string, params map[string]interface{}, out interface{}) error {
+ return doPutAPI(c.BaseURL, path, params, c.ah, out)
+}
+
+func (b *Bucket) parseURLResponse(path string, out interface{}) error {
+ nodes := b.Nodes()
+ if len(nodes) == 0 {
+ return errors.New("no couch rest URLs")
+ }
+
+ // Pick a random node to start querying.
+ startNode := rand.Intn(len(nodes))
+ maxRetries := len(nodes)
+ for i := 0; i < maxRetries; i++ {
+ node := nodes[(startNode+i)%len(nodes)] // Wrap around the nodes list.
+ // Skip non-healthy nodes.
+ if node.Status != "healthy" || node.CouchAPIBase == "" {
+ continue
+ }
+ url := &url.URL{
+ Host: node.Hostname,
+ Scheme: "http",
+ }
+
+ // Lock here to avoid having pool closed under us.
+ b.RLock()
+ err := queryRestAPI(url, path, b.pool.client.ah, out)
+ b.RUnlock()
+ if err == nil {
+ return err
+ }
+ }
+ return errors.New("All nodes failed to respond or no healthy nodes for bucket found")
+}
+
+func (b *Bucket) parseAPIResponse(path string, out interface{}) error {
+ nodes := b.Nodes()
+ if len(nodes) == 0 {
+ return errors.New("no couch rest URLs")
+ }
+
+ var err error
+ var u *url.URL
+
+ // Pick a random node to start querying.
+ startNode := rand.Intn(len(nodes))
+ maxRetries := len(nodes)
+ for i := 0; i < maxRetries; i++ {
+ node := nodes[(startNode+i)%len(nodes)] // Wrap around the nodes list.
+ // Skip non-healthy nodes.
+ if node.Status != "healthy" || node.CouchAPIBase == "" {
+ continue
+ }
+
+ u, err = ParseURL(node.CouchAPIBase)
+ // Lock here so pool does not get closed under us.
+ b.RLock()
+ if err != nil {
+ b.RUnlock()
+ return fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v",
+ b.Name, i, node.CouchAPIBase, err)
+ } else if b.pool != nil {
+ u.User = b.pool.client.BaseURL.User
+ }
+ u.Path = path
+
+ // generate the path so that the strings are properly escaped
+ // MB-13770
+ requestPath := strings.Split(u.String(), u.Host)[1]
+
+ err = queryRestAPI(u, requestPath, b.pool.client.ah, out)
+ b.RUnlock()
+ if err == nil {
+ return err
+ }
+ }
+
+ var errStr string
+ if err != nil {
+ errStr = "Error " + err.Error()
+ }
+
+ return errors.New("All nodes failed to respond or returned error or no healthy nodes for bucket found." + errStr)
+}
+
+type basicAuth struct {
+ u, p string
+}
+
+func (b basicAuth) GetCredentials() (string, string, string) {
+ return b.u, b.p, b.u
+}
+
+func basicAuthFromURL(us string) (ah AuthHandler) {
+ u, err := ParseURL(us)
+ if err != nil {
+ return
+ }
+ if user := u.User; user != nil {
+ pw, _ := user.Password()
+ ah = basicAuth{user.Username(), pw}
+ }
+ return
+}
+
+// ConnectWithAuth connects to a couchbase cluster with the given
+// authentication handler.
+func ConnectWithAuth(baseU string, ah AuthHandler) (c Client, err error) {
+ c.BaseURL, err = ParseURL(baseU)
+ if err != nil {
+ return
+ }
+ c.ah = ah
+
+ return c, c.parseURLResponse("/pools", &c.Info)
+}
+
+// Call this method with a TLS certificate file name to make communication
+// with the KV engine encrypted.
+//
+// This method should be called immediately after a Connect*() method.
+func (c *Client) InitTLS(certFile string) error {
+ serverCert, err := ioutil.ReadFile(certFile)
+ if err != nil {
+ return err
+ }
+ CA_Pool := x509.NewCertPool()
+ CA_Pool.AppendCertsFromPEM(serverCert)
+ c.tlsConfig = &tls.Config{RootCAs: CA_Pool}
+ return nil
+}
+
+func (c *Client) ClearTLS() {
+ c.tlsConfig = nil
+}
+
+// ConnectWithAuthCreds connects to a couchbase cluster with the give
+// authorization creds returned by cb_auth
+func ConnectWithAuthCreds(baseU, username, password string) (c Client, err error) {
+ c.BaseURL, err = ParseURL(baseU)
+ if err != nil {
+ return
+ }
+
+ c.ah = newBucketAuth(username, password, "")
+ return c, c.parseURLResponse("/pools", &c.Info)
+}
+
+// Connect to a couchbase cluster. An authentication handler will be
+// created from the userinfo in the URL if provided.
+func Connect(baseU string) (Client, error) {
+ return ConnectWithAuth(baseU, basicAuthFromURL(baseU))
+}
+
+type BucketInfo struct {
+ Name string // name of bucket
+ Password string // SASL password of bucket
+}
+
+//Get SASL buckets
+func GetBucketList(baseU string) (bInfo []BucketInfo, err error) {
+
+ c := &Client{}
+ c.BaseURL, err = ParseURL(baseU)
+ if err != nil {
+ return
+ }
+ c.ah = basicAuthFromURL(baseU)
+
+ var buckets []Bucket
+ err = c.parseURLResponse("/pools/default/buckets", &buckets)
+ if err != nil {
+ return
+ }
+ bInfo = make([]BucketInfo, 0)
+ for _, bucket := range buckets {
+ bucketInfo := BucketInfo{Name: bucket.Name, Password: bucket.Password}
+ bInfo = append(bInfo, bucketInfo)
+ }
+ return bInfo, err
+}
+
+//Set viewUpdateDaemonOptions
+func SetViewUpdateParams(baseU string, params map[string]interface{}) (viewOpts map[string]interface{}, err error) {
+
+ c := &Client{}
+ c.BaseURL, err = ParseURL(baseU)
+ if err != nil {
+ return
+ }
+ c.ah = basicAuthFromURL(baseU)
+
+ if len(params) < 1 {
+ return nil, fmt.Errorf("No params to set")
+ }
+
+ err = c.parsePostURLResponse("/settings/viewUpdateDaemon", params, &viewOpts)
+ if err != nil {
+ return
+ }
+ return viewOpts, err
+}
+
+// This API lets the caller know, if the list of nodes a bucket is
+// connected to has gone through an edit (a rebalance operation)
+// since the last update to the bucket, in which case a Refresh is
+// advised.
+func (b *Bucket) NodeListChanged() bool {
+ b.RLock()
+ pool := b.pool
+ uri := b.URI
+ b.RUnlock()
+
+ tmpb := &Bucket{}
+ err := pool.client.parseURLResponse(uri, tmpb)
+ if err != nil {
+ return true
+ }
+
+ bNodes := *(*[]Node)(b.nodeList)
+ if len(bNodes) != len(tmpb.NodesJSON) {
+ return true
+ }
+
+ bucketHostnames := map[string]bool{}
+ for _, node := range bNodes {
+ bucketHostnames[node.Hostname] = true
+ }
+
+ for _, node := range tmpb.NodesJSON {
+ if _, found := bucketHostnames[node.Hostname]; !found {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Sample data for scopes and collections as returned from the
+// /pooles/default/$BUCKET_NAME/collections API.
+// {"myScope2":{"myCollectionC":{}},"myScope1":{"myCollectionB":{},"myCollectionA":{}},"_default":{"_default":{}}}
+
+// Structures for parsing collections manifest.
+// The map key is the name of the scope.
+// Example data:
+// {"uid":"b","scopes":[
+// {"name":"_default","uid":"0","collections":[
+// {"name":"_default","uid":"0"}]},
+// {"name":"myScope1","uid":"8","collections":[
+// {"name":"myCollectionB","uid":"c"},
+// {"name":"myCollectionA","uid":"b"}]},
+// {"name":"myScope2","uid":"9","collections":[
+// {"name":"myCollectionC","uid":"d"}]}]}
+type InputManifest struct {
+ Uid string
+ Scopes []InputScope
+}
+type InputScope struct {
+ Name string
+ Uid string
+ Collections []InputCollection
+}
+type InputCollection struct {
+ Name string
+ Uid string
+}
+
+// Structures for storing collections information.
+type Manifest struct {
+ Uid uint64
+ Scopes map[string]*Scope // map by name
+}
+type Scope struct {
+ Name string
+ Uid uint64
+ Collections map[string]*Collection // map by name
+}
+type Collection struct {
+ Name string
+ Uid uint64
+}
+
+var _EMPTY_MANIFEST *Manifest = &Manifest{Uid: 0, Scopes: map[string]*Scope{}}
+
+func parseCollectionsManifest(res *gomemcached.MCResponse) (*Manifest, error) {
+ if !EnableCollections {
+ return _EMPTY_MANIFEST, nil
+ }
+
+ var im InputManifest
+ err := json.Unmarshal(res.Body, &im)
+ if err != nil {
+ return nil, err
+ }
+
+ uid, err := strconv.ParseUint(im.Uid, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+ mani := &Manifest{Uid: uid, Scopes: make(map[string]*Scope, len(im.Scopes))}
+ for _, iscope := range im.Scopes {
+ scope_uid, err := strconv.ParseUint(iscope.Uid, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+ scope := &Scope{Uid: scope_uid, Name: iscope.Name, Collections: make(map[string]*Collection, len(iscope.Collections))}
+ mani.Scopes[iscope.Name] = scope
+ for _, icoll := range iscope.Collections {
+ coll_uid, err := strconv.ParseUint(icoll.Uid, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+ coll := &Collection{Uid: coll_uid, Name: icoll.Name}
+ scope.Collections[icoll.Name] = coll
+ }
+ }
+
+ return mani, nil
+}
+
+// This function assumes the bucket is locked.
+func (b *Bucket) GetCollectionsManifest() (*Manifest, error) {
+ // Collections not used?
+ if !EnableCollections {
+ return nil, fmt.Errorf("Collections not enabled.")
+ }
+
+ b.RLock()
+ pools := b.getConnPools(true /* already locked */)
+ pool := pools[0] // Any pool will do, so use the first one.
+ b.RUnlock()
+ client, err := pool.Get()
+ if err != nil {
+ return nil, fmt.Errorf("Unable to get connection to retrieve collections manifest: %v. No collections access to bucket %s.", err, b.Name)
+ }
+
+ // We need to select the bucket before GetCollectionsManifest()
+ // will work. This is sometimes done at startup (see defaultMkConn())
+ // but not always, depending on the auth type.
+ // Doing this is safe because we collect the the connections
+ // by bucket, so the bucket being selected will never change.
+ _, err = client.SelectBucket(b.Name)
+ if err != nil {
+ pool.Return(client)
+ return nil, fmt.Errorf("Unable to select bucket %s: %v. No collections access to bucket %s.", err, b.Name, b.Name)
+ }
+
+ res, err := client.GetCollectionsManifest()
+ if err != nil {
+ pool.Return(client)
+ return nil, fmt.Errorf("Unable to retrieve collections manifest: %v. No collections access to bucket %s.", err, b.Name)
+ }
+ mani, err := parseCollectionsManifest(res)
+ if err != nil {
+ pool.Return(client)
+ return nil, fmt.Errorf("Unable to parse collections manifest: %v. No collections access to bucket %s.", err, b.Name)
+ }
+
+ pool.Return(client)
+ return mani, nil
+}
+
+func (b *Bucket) RefreshFully() error {
+ return b.refresh(false)
+}
+
+func (b *Bucket) Refresh() error {
+ return b.refresh(true)
+}
+
+func (b *Bucket) refresh(preserveConnections bool) error {
+ b.RLock()
+ pool := b.pool
+ uri := b.URI
+ client := pool.client
+ b.RUnlock()
+ tlsConfig := client.tlsConfig
+
+ var poolServices PoolServices
+ var err error
+ if tlsConfig != nil {
+ poolServices, err = client.GetPoolServices("default")
+ if err != nil {
+ return err
+ }
+ }
+
+ tmpb := &Bucket{}
+ err = pool.client.parseURLResponse(uri, tmpb)
+ if err != nil {
+ return err
+ }
+
+ pools := b.getConnPools(false /* bucket not already locked */)
+
+ // We need this lock to ensure that bucket refreshes happening because
+ // of NMVb errors received during bulkGet do not end up over-writing
+ // pool.inUse.
+ b.Lock()
+
+ for _, pool := range pools {
+ if pool != nil {
+ pool.inUse = false
+ }
+ }
+
+ newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList))
+ for i := range newcps {
+
+ if preserveConnections {
+ pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */)
+ if pool != nil && pool.inUse == false {
+ // if the hostname and index is unchanged then reuse this pool
+ newcps[i] = pool
+ pool.inUse = true
+ continue
+ }
+ }
+
+ hostport := tmpb.VBSMJson.ServerList[i]
+ if tlsConfig != nil {
+ hostport, err = MapKVtoSSL(hostport, &poolServices)
+ if err != nil {
+ b.Unlock()
+ return err
+ }
+ }
+
+ if b.ah != nil {
+ newcps[i] = newConnectionPool(hostport,
+ b.ah, AsynchronousCloser, PoolSize, PoolOverflow, tlsConfig, b.Name)
+
+ } else {
+ newcps[i] = newConnectionPool(hostport,
+ b.authHandler(true /* bucket already locked */),
+ AsynchronousCloser, PoolSize, PoolOverflow, tlsConfig, b.Name)
+ }
+ }
+ b.replaceConnPools2(newcps, true /* bucket already locked */)
+ tmpb.ah = b.ah
+ b.vBucketServerMap = unsafe.Pointer(&tmpb.VBSMJson)
+ b.nodeList = unsafe.Pointer(&tmpb.NodesJSON)
+
+ b.Unlock()
+ return nil
+}
+
+func (p *Pool) refresh() (err error) {
+ p.BucketMap = make(map[string]*Bucket)
+
+ buckets := []Bucket{}
+ err = p.client.parseURLResponse(p.BucketURL["uri"], &buckets)
+ if err != nil {
+ return err
+ }
+ for i, _ := range buckets {
+ b := new(Bucket)
+ *b = buckets[i]
+ b.pool = p
+ b.nodeList = unsafe.Pointer(&b.NodesJSON)
+
+ // MB-33185 this is merely defensive, just in case
+ // refresh() gets called on a perfectly node pool
+ ob, ok := p.BucketMap[b.Name]
+ if ok && ob.connPools != nil {
+ ob.Close()
+ }
+ b.replaceConnPools(make([]*connectionPool, len(b.VBSMJson.ServerList)))
+ p.BucketMap[b.Name] = b
+ runtime.SetFinalizer(b, bucketFinalizer)
+ }
+ return nil
+}
+
+// GetPool gets a pool from within the couchbase cluster (usually
+// "default").
+func (c *Client) GetPool(name string) (p Pool, err error) {
+ var poolURI string
+
+ for _, p := range c.Info.Pools {
+ if p.Name == name {
+ poolURI = p.URI
+ break
+ }
+ }
+ if poolURI == "" {
+ return p, errors.New("No pool named " + name)
+ }
+
+ err = c.parseURLResponse(poolURI, &p)
+
+ p.client = c
+
+ err = p.refresh()
+ return
+}
+
+// GetPoolServices returns all the bucket-independent services in a pool.
+// (See "Exposing services outside of bucket context" in http://goo.gl/uuXRkV)
+func (c *Client) GetPoolServices(name string) (ps PoolServices, err error) {
+ var poolName string
+ for _, p := range c.Info.Pools {
+ if p.Name == name {
+ poolName = p.Name
+ }
+ }
+ if poolName == "" {
+ return ps, errors.New("No pool named " + name)
+ }
+
+ poolURI := "/pools/" + poolName + "/nodeServices"
+ err = c.parseURLResponse(poolURI, &ps)
+
+ return
+}
+
+func (b *Bucket) GetPoolServices(name string) (*PoolServices, error) {
+ b.RLock()
+ pool := b.pool
+ b.RUnlock()
+
+ ps, err := pool.client.GetPoolServices(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ps, nil
+}
+
+// Close marks this bucket as no longer needed, closing connections it
+// may have open.
+func (b *Bucket) Close() {
+ b.Lock()
+ defer b.Unlock()
+ if b.connPools != nil {
+ for _, c := range b.getConnPools(true /* already locked */) {
+ if c != nil {
+ c.Close()
+ }
+ }
+ b.connPools = nil
+ }
+}
+
+func bucketFinalizer(b *Bucket) {
+ if b.connPools != nil {
+ if !b.closed {
+ logging.Warnf("Finalizing a bucket with active connections.")
+ }
+
+ // MB-33185 do not leak connection pools
+ b.Close()
+ }
+}
+
+// GetBucket gets a bucket from within this pool.
+func (p *Pool) GetBucket(name string) (*Bucket, error) {
+ rv, ok := p.BucketMap[name]
+ if !ok {
+ return nil, &BucketNotFoundError{bucket: name}
+ }
+ err := rv.Refresh()
+ if err != nil {
+ return nil, err
+ }
+ return rv, nil
+}
+
+// GetBucket gets a bucket from within this pool.
+func (p *Pool) GetBucketWithAuth(bucket, username, password string) (*Bucket, error) {
+ rv, ok := p.BucketMap[bucket]
+ if !ok {
+ return nil, &BucketNotFoundError{bucket: bucket}
+ }
+ rv.ah = newBucketAuth(username, password, bucket)
+ err := rv.Refresh()
+ if err != nil {
+ return nil, err
+ }
+ return rv, nil
+}
+
+// GetPool gets the pool to which this bucket belongs.
+func (b *Bucket) GetPool() *Pool {
+ b.RLock()
+ defer b.RUnlock()
+ ret := b.pool
+ return ret
+}
+
+// GetClient gets the client from which we got this pool.
+func (p *Pool) GetClient() *Client {
+ return p.client
+}
+
+// Release bucket connections when the pool is no longer in use
+func (p *Pool) Close() {
+ // fine to loop through the buckets unlocked
+ // locking happens at the bucket level
+ for b, _ := range p.BucketMap {
+
+ // MB-33208 defer closing connection pools until the bucket is no longer used
+ bucket := p.BucketMap[b]
+ bucket.Lock()
+ bucket.closed = true
+ bucket.Unlock()
+ }
+}
+
+// GetBucket is a convenience function for getting a named bucket from
+// a URL
+func GetBucket(endpoint, poolname, bucketname string) (*Bucket, error) {
+ var err error
+ client, err := Connect(endpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ pool, err := client.GetPool(poolname)
+ if err != nil {
+ return nil, err
+ }
+
+ return pool.GetBucket(bucketname)
+}
+
+// ConnectWithAuthAndGetBucket is a convenience function for
+// getting a named bucket from a given URL and an auth callback
+func ConnectWithAuthAndGetBucket(endpoint, poolname, bucketname string,
+ ah AuthHandler) (*Bucket, error) {
+ client, err := ConnectWithAuth(endpoint, ah)
+ if err != nil {
+ return nil, err
+ }
+
+ pool, err := client.GetPool(poolname)
+ if err != nil {
+ return nil, err
+ }
+
+ return pool.GetBucket(bucketname)
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/port_map.go b/vendor/github.com/couchbase/go-couchbase/port_map.go
new file mode 100644
index 0000000..24c9f10
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/port_map.go
@@ -0,0 +1,84 @@
+package couchbase
+
+/*
+
+The goal here is to map a hostname:port combination to another hostname:port
+combination. The original hostname:port gives the name and regular KV port
+of a couchbase server. We want to determine the corresponding SSL KV port.
+
+To do this, we have a pool services structure, as obtained from
+the /pools/default/nodeServices API.
+
+For a fully configured two-node system, the structure may look like this:
+{"rev":32,"nodesExt":[
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"fts":8094,"ftsSSL":18094,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211},"hostname":"172.23.123.101"},
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211,"n1ql":8093,"n1qlSSL":18093},"thisNode":true,"hostname":"172.23.123.102"}]}
+
+In this case, note the "hostname" fields, and the "kv" and "kvSSL" fields.
+
+For a single-node system, perhaps brought up for testing, the structure may look like this:
+{"rev":66,"nodesExt":[
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"kv":11210,"kvSSL":11207,"capi":8092,"capiSSL":18092,"projector":9999,"n1ql":8093,"n1qlSSL":18093},"thisNode":true}],"clusterCapabilitiesVer":[1,0],"clusterCapabilities":{"n1ql":["enhancedPreparedStatements"]}}
+
+Here, note that there is only a single entry in the "nodeExt" array and that it does not have a "hostname" field.
+We will assume that either hostname fields are present, or there is only a single node.
+*/
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func ParsePoolServices(jsonInput string) (*PoolServices, error) {
+ ps := &PoolServices{}
+ err := json.Unmarshal([]byte(jsonInput), ps)
+ return ps, err
+}
+
+func MapKVtoSSL(hostport string, ps *PoolServices) (string, error) {
+ colonIndex := strings.LastIndex(hostport, ":")
+ if colonIndex < 0 {
+ return "", fmt.Errorf("Unable to find host/port separator in %s", hostport)
+ }
+ host := hostport[0:colonIndex]
+ port := hostport[colonIndex+1:]
+ portInt, err := strconv.Atoi(port)
+ if err != nil {
+ return "", fmt.Errorf("Unable to parse host/port combination %s: %v", hostport, err)
+ }
+
+ var ns *NodeServices
+ if len(ps.NodesExt) == 1 {
+ ns = &(ps.NodesExt[0])
+ } else {
+ for i := range ps.NodesExt {
+ hostname := ps.NodesExt[i].Hostname
+ if len(hostname) == 0 {
+ // in case of missing hostname, check for 127.0.0.1
+ hostname = "127.0.0.1"
+ }
+ if hostname == host {
+ ns = &(ps.NodesExt[i])
+ break
+ }
+ }
+ }
+
+ if ns == nil {
+ return "", fmt.Errorf("Unable to parse host/port combination %s: no matching node found among %d", hostport, len(ps.NodesExt))
+ }
+ kv, found := ns.Services["kv"]
+ if !found {
+ return "", fmt.Errorf("Unable to map host/port combination %s: target host has no kv port listed", hostport)
+ }
+ kvSSL, found := ns.Services["kvSSL"]
+ if !found {
+ return "", fmt.Errorf("Unable to map host/port combination %s: target host has no kvSSL port listed", hostport)
+ }
+ if portInt != kv {
+ return "", fmt.Errorf("Unable to map hostport combination %s: expected port %d but found %d", hostport, portInt, kv)
+ }
+ return fmt.Sprintf("%s:%d", host, kvSSL), nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/streaming.go b/vendor/github.com/couchbase/go-couchbase/streaming.go
new file mode 100644
index 0000000..6e795ce
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/streaming.go
@@ -0,0 +1,215 @@
+package couchbase
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/couchbase/goutils/logging"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "net/http"
+ "time"
+ "unsafe"
+)
+
+// Bucket auto-updater gets the latest version of the bucket config from
+// the server. If the configuration has changed then updated the local
+// bucket information. If the bucket has been deleted then notify anyone
+// who is holding a reference to this bucket
+
+const MAX_RETRY_COUNT = 5
+const DISCONNECT_PERIOD = 120 * time.Second
+
+type NotifyFn func(bucket string, err error)
+
+// Use TCP keepalive to detect half close sockets
+var updaterTransport http.RoundTripper = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+}
+
+var updaterHTTPClient = &http.Client{Transport: updaterTransport, Timeout: 30 * time.Second}
+
+func doHTTPRequestForUpdate(req *http.Request) (*http.Response, error) {
+
+ var err error
+ var res *http.Response
+
+ for i := 0; i < HTTP_MAX_RETRY; i++ {
+ res, err = updaterHTTPClient.Do(req)
+ if err != nil && isHttpConnError(err) {
+ continue
+ }
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, err
+}
+
+func (b *Bucket) RunBucketUpdater(notify NotifyFn) {
+ go func() {
+ err := b.UpdateBucket()
+ if err != nil {
+ if notify != nil {
+ notify(b.GetName(), err)
+ }
+ logging.Errorf(" Bucket Updater exited with err %v", err)
+ }
+ }()
+}
+
+func (b *Bucket) replaceConnPools2(with []*connectionPool, bucketLocked bool) {
+ if !bucketLocked {
+ b.Lock()
+ defer b.Unlock()
+ }
+ old := b.connPools
+ b.connPools = unsafe.Pointer(&with)
+ if old != nil {
+ for _, pool := range *(*[]*connectionPool)(old) {
+ if pool != nil && pool.inUse == false {
+ pool.Close()
+ }
+ }
+ }
+ return
+}
+
+func (b *Bucket) UpdateBucket() error {
+
+ var failures int
+ var returnErr error
+
+ var poolServices PoolServices
+ var err error
+ tlsConfig := b.pool.client.tlsConfig
+ if tlsConfig != nil {
+ poolServices, err = b.pool.client.GetPoolServices("default")
+ if err != nil {
+ return err
+ }
+ }
+
+ for {
+
+ if failures == MAX_RETRY_COUNT {
+ logging.Errorf(" Maximum failures reached. Exiting loop...")
+ return fmt.Errorf("Max failures reached. Last Error %v", returnErr)
+ }
+
+ nodes := b.Nodes()
+ if len(nodes) < 1 {
+ return fmt.Errorf("No healthy nodes found")
+ }
+
+ startNode := rand.Intn(len(nodes))
+ node := nodes[(startNode)%len(nodes)]
+
+ streamUrl := fmt.Sprintf("http://%s/pools/default/bucketsStreaming/%s", node.Hostname, b.GetName())
+ logging.Infof(" Trying with %s", streamUrl)
+ req, err := http.NewRequest("GET", streamUrl, nil)
+ if err != nil {
+ return err
+ }
+
+ // Lock here to avoid having pool closed under us.
+ b.RLock()
+ err = maybeAddAuth(req, b.pool.client.ah)
+ b.RUnlock()
+ if err != nil {
+ return err
+ }
+
+ res, err := doHTTPRequestForUpdate(req)
+ if err != nil {
+ return err
+ }
+
+ if res.StatusCode != 200 {
+ bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ logging.Errorf("Failed to connect to host, unexpected status code: %v. Body %s", res.StatusCode, bod)
+ res.Body.Close()
+ returnErr = fmt.Errorf("Failed to connect to host. Status %v Body %s", res.StatusCode, bod)
+ failures++
+ continue
+ }
+
+ dec := json.NewDecoder(res.Body)
+
+ tmpb := &Bucket{}
+ for {
+
+ err := dec.Decode(&tmpb)
+ if err != nil {
+ returnErr = err
+ res.Body.Close()
+ break
+ }
+
+ // if we got here, reset failure count
+ failures = 0
+ b.Lock()
+
+ // mark all the old connection pools for deletion
+ pools := b.getConnPools(true /* already locked */)
+ for _, pool := range pools {
+ if pool != nil {
+ pool.inUse = false
+ }
+ }
+
+ newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList))
+ for i := range newcps {
+ // get the old connection pool and check if it is still valid
+ pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */)
+ if pool != nil && pool.inUse == false {
+ // if the hostname and index is unchanged then reuse this pool
+ newcps[i] = pool
+ pool.inUse = true
+ continue
+ }
+ // else create a new pool
+ hostport := tmpb.VBSMJson.ServerList[i]
+ if tlsConfig != nil {
+ hostport, err = MapKVtoSSL(hostport, &poolServices)
+ if err != nil {
+ b.Unlock()
+ return err
+ }
+ }
+ if b.ah != nil {
+ newcps[i] = newConnectionPool(hostport,
+ b.ah, false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name)
+
+ } else {
+ newcps[i] = newConnectionPool(hostport,
+ b.authHandler(true /* bucket already locked */),
+ false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name)
+ }
+ }
+
+ b.replaceConnPools2(newcps, true /* bucket already locked */)
+
+ tmpb.ah = b.ah
+ b.vBucketServerMap = unsafe.Pointer(&tmpb.VBSMJson)
+ b.nodeList = unsafe.Pointer(&tmpb.NodesJSON)
+ b.Unlock()
+
+ logging.Infof("Got new configuration for bucket %s", b.GetName())
+
+ }
+ // we are here because of an error
+ failures++
+ continue
+
+ }
+ return nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/tap.go b/vendor/github.com/couchbase/go-couchbase/tap.go
new file mode 100644
index 0000000..86edd30
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/tap.go
@@ -0,0 +1,143 @@
+package couchbase
+
+import (
+ "github.com/couchbase/gomemcached/client"
+ "github.com/couchbase/goutils/logging"
+ "sync"
+ "time"
+)
+
+const initialRetryInterval = 1 * time.Second
+const maximumRetryInterval = 30 * time.Second
+
+// A TapFeed streams mutation events from a bucket.
+//
+// Events from the bucket can be read from the channel 'C'. Remember
+// to call Close() on it when you're done, unless its channel has
+// closed itself already.
+type TapFeed struct {
+ C <-chan memcached.TapEvent
+
+ bucket *Bucket
+ args *memcached.TapArguments
+ nodeFeeds []*memcached.TapFeed // The TAP feeds of the individual nodes
+ output chan memcached.TapEvent // Same as C but writeably-typed
+ wg sync.WaitGroup
+ quit chan bool
+}
+
+// StartTapFeed creates and starts a new Tap feed
+func (b *Bucket) StartTapFeed(args *memcached.TapArguments) (*TapFeed, error) {
+ if args == nil {
+ defaultArgs := memcached.DefaultTapArguments()
+ args = &defaultArgs
+ }
+
+ feed := &TapFeed{
+ bucket: b,
+ args: args,
+ output: make(chan memcached.TapEvent, 10),
+ quit: make(chan bool),
+ }
+
+ go feed.run()
+
+ feed.C = feed.output
+ return feed, nil
+}
+
+// Goroutine that runs the feed
+func (feed *TapFeed) run() {
+ retryInterval := initialRetryInterval
+ bucketOK := true
+ for {
+ // Connect to the TAP feed of each server node:
+ if bucketOK {
+ killSwitch, err := feed.connectToNodes()
+ if err == nil {
+ // Run until one of the sub-feeds fails:
+ select {
+ case <-killSwitch:
+ case <-feed.quit:
+ return
+ }
+ feed.closeNodeFeeds()
+ retryInterval = initialRetryInterval
+ }
+ }
+
+ // On error, try to refresh the bucket in case the list of nodes changed:
+ logging.Infof("go-couchbase: TAP connection lost; reconnecting to bucket %q in %v",
+ feed.bucket.Name, retryInterval)
+ err := feed.bucket.Refresh()
+ bucketOK = err == nil
+
+ select {
+ case <-time.After(retryInterval):
+ case <-feed.quit:
+ return
+ }
+ if retryInterval *= 2; retryInterval > maximumRetryInterval {
+ retryInterval = maximumRetryInterval
+ }
+ }
+}
+
+func (feed *TapFeed) connectToNodes() (killSwitch chan bool, err error) {
+ killSwitch = make(chan bool)
+ for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) {
+ var singleFeed *memcached.TapFeed
+ singleFeed, err = serverConn.StartTapFeed(feed.args)
+ if err != nil {
+ logging.Errorf("go-couchbase: Error connecting to tap feed of %s: %v", serverConn.host, err)
+ feed.closeNodeFeeds()
+ return
+ }
+ feed.nodeFeeds = append(feed.nodeFeeds, singleFeed)
+ go feed.forwardTapEvents(singleFeed, killSwitch, serverConn.host)
+ feed.wg.Add(1)
+ }
+ return
+}
+
+// Goroutine that forwards Tap events from a single node's feed to the aggregate feed.
+func (feed *TapFeed) forwardTapEvents(singleFeed *memcached.TapFeed, killSwitch chan bool, host string) {
+ defer feed.wg.Done()
+ for {
+ select {
+ case event, ok := <-singleFeed.C:
+ if !ok {
+ if singleFeed.Error != nil {
+ logging.Errorf("go-couchbase: Tap feed from %s failed: %v", host, singleFeed.Error)
+ }
+ killSwitch <- true
+ return
+ }
+ feed.output <- event
+ case <-feed.quit:
+ return
+ }
+ }
+}
+
+func (feed *TapFeed) closeNodeFeeds() {
+ for _, f := range feed.nodeFeeds {
+ f.Close()
+ }
+ feed.nodeFeeds = nil
+}
+
+// Close a Tap feed.
+func (feed *TapFeed) Close() error {
+ select {
+ case <-feed.quit:
+ return nil
+ default:
+ }
+
+ feed.closeNodeFeeds()
+ close(feed.quit)
+ feed.wg.Wait()
+ close(feed.output)
+ return nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/upr.go b/vendor/github.com/couchbase/go-couchbase/upr.go
new file mode 100644
index 0000000..bf1b209
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/upr.go
@@ -0,0 +1,398 @@
+package couchbase
+
+import (
+ "log"
+ "sync"
+ "time"
+
+ "fmt"
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/gomemcached/client"
+ "github.com/couchbase/goutils/logging"
+)
+
+// A UprFeed streams mutation events from a bucket.
+//
+// Events from the bucket can be read from the channel 'C'. Remember
+// to call Close() on it when you're done, unless its channel has
+// closed itself already.
+type UprFeed struct {
+ C <-chan *memcached.UprEvent
+
+ bucket *Bucket
+ nodeFeeds map[string]*FeedInfo // The UPR feeds of the individual nodes
+ output chan *memcached.UprEvent // Same as C but writeably-typed
+ outputClosed bool
+ quit chan bool
+ name string // name of this UPR feed
+ sequence uint32 // sequence number for this feed
+ connected bool
+ killSwitch chan bool
+ closing bool
+ wg sync.WaitGroup
+ dcp_buffer_size uint32
+ data_chan_size int
+}
+
+// UprFeed from a single connection
+type FeedInfo struct {
+ uprFeed *memcached.UprFeed // UPR feed handle
+ host string // hostname
+ connected bool // connected
+ quit chan bool // quit channel
+}
+
+type FailoverLog map[uint16]memcached.FailoverLog
+
+// GetFailoverLogs, get the failover logs for a set of vbucket ids
+func (b *Bucket) GetFailoverLogs(vBuckets []uint16) (FailoverLog, error) {
+
+ // map vbids to their corresponding hosts
+ vbHostList := make(map[string][]uint16)
+ vbm := b.VBServerMap()
+ if len(vbm.VBucketMap) < len(vBuckets) {
+ return nil, fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
+ vbm.VBucketMap, vBuckets)
+ }
+
+ for _, vb := range vBuckets {
+ masterID := vbm.VBucketMap[vb][0]
+ master := b.getMasterNode(masterID)
+ if master == "" {
+ return nil, fmt.Errorf("No master found for vb %d", vb)
+ }
+
+ vbList := vbHostList[master]
+ if vbList == nil {
+ vbList = make([]uint16, 0)
+ }
+ vbList = append(vbList, vb)
+ vbHostList[master] = vbList
+ }
+
+ failoverLogMap := make(FailoverLog)
+ for _, serverConn := range b.getConnPools(false /* not already locked */) {
+
+ vbList := vbHostList[serverConn.host]
+ if vbList == nil {
+ continue
+ }
+
+ mc, err := serverConn.Get()
+ if err != nil {
+ logging.Infof("No Free connections for vblist %v", vbList)
+ return nil, fmt.Errorf("No Free connections for host %s",
+ serverConn.host)
+
+ }
+ // close the connection so that it doesn't get reused for upr data
+ // connection
+ defer mc.Close()
+ failoverlogs, err := mc.UprGetFailoverLog(vbList)
+ if err != nil {
+ return nil, fmt.Errorf("Error getting failover log %s host %s",
+ err.Error(), serverConn.host)
+
+ }
+
+ for vb, log := range failoverlogs {
+ failoverLogMap[vb] = *log
+ }
+ }
+
+ return failoverLogMap, nil
+}
+
+func (b *Bucket) StartUprFeed(name string, sequence uint32) (*UprFeed, error) {
+ return b.StartUprFeedWithConfig(name, sequence, 10, DEFAULT_WINDOW_SIZE)
+}
+
+// StartUprFeed creates and starts a new Upr feed
+// No data will be sent on the channel unless vbuckets streams are requested
+func (b *Bucket) StartUprFeedWithConfig(name string, sequence uint32, data_chan_size int, dcp_buffer_size uint32) (*UprFeed, error) {
+
+ feed := &UprFeed{
+ bucket: b,
+ output: make(chan *memcached.UprEvent, data_chan_size),
+ quit: make(chan bool),
+ nodeFeeds: make(map[string]*FeedInfo, 0),
+ name: name,
+ sequence: sequence,
+ killSwitch: make(chan bool),
+ dcp_buffer_size: dcp_buffer_size,
+ data_chan_size: data_chan_size,
+ }
+
+ err := feed.connectToNodes()
+ if err != nil {
+ return nil, fmt.Errorf("Cannot connect to bucket %s", err.Error())
+ }
+ feed.connected = true
+ go feed.run()
+
+ feed.C = feed.output
+ return feed, nil
+}
+
+// UprRequestStream starts a stream for a vb on a feed
+func (feed *UprFeed) UprRequestStream(vb uint16, opaque uint16, flags uint32,
+ vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Panicf("Panic in UprRequestStream. Feed %v Bucket %v", feed, feed.bucket)
+ }
+ }()
+
+ vbm := feed.bucket.VBServerMap()
+ if len(vbm.VBucketMap) < int(vb) {
+ return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
+ vb, vbm.VBucketMap)
+ }
+
+ if int(vb) >= len(vbm.VBucketMap) {
+ return fmt.Errorf("Invalid vbucket id %d", vb)
+ }
+
+ masterID := vbm.VBucketMap[vb][0]
+ master := feed.bucket.getMasterNode(masterID)
+ if master == "" {
+ return fmt.Errorf("Master node not found for vbucket %d", vb)
+ }
+ singleFeed := feed.nodeFeeds[master]
+ if singleFeed == nil {
+ return fmt.Errorf("UprFeed for this host not found")
+ }
+
+ if err := singleFeed.uprFeed.UprRequestStream(vb, opaque, flags,
+ vuuid, startSequence, endSequence, snapStart, snapEnd); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UprCloseStream ends a vbucket stream.
+func (feed *UprFeed) UprCloseStream(vb, opaqueMSB uint16) error {
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Panicf("Panic in UprCloseStream. Feed %v Bucket %v ", feed, feed.bucket)
+ }
+ }()
+
+ vbm := feed.bucket.VBServerMap()
+ if len(vbm.VBucketMap) < int(vb) {
+ return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
+ vb, vbm.VBucketMap)
+ }
+
+ if int(vb) >= len(vbm.VBucketMap) {
+ return fmt.Errorf("Invalid vbucket id %d", vb)
+ }
+
+ masterID := vbm.VBucketMap[vb][0]
+ master := feed.bucket.getMasterNode(masterID)
+ if master == "" {
+ return fmt.Errorf("Master node not found for vbucket %d", vb)
+ }
+ singleFeed := feed.nodeFeeds[master]
+ if singleFeed == nil {
+ return fmt.Errorf("UprFeed for this host not found")
+ }
+
+ if err := singleFeed.uprFeed.CloseStream(vb, opaqueMSB); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Goroutine that runs the feed
+func (feed *UprFeed) run() {
+ retryInterval := initialRetryInterval
+ bucketOK := true
+ for {
+ // Connect to the UPR feed of each server node:
+ if bucketOK {
+ // Run until one of the sub-feeds fails:
+ select {
+ case <-feed.killSwitch:
+ case <-feed.quit:
+ return
+ }
+ //feed.closeNodeFeeds()
+ retryInterval = initialRetryInterval
+ }
+
+ if feed.closing == true {
+ // we have been asked to shut down
+ return
+ }
+
+ // On error, try to refresh the bucket in case the list of nodes changed:
+ logging.Infof("go-couchbase: UPR connection lost; reconnecting to bucket %q in %v",
+ feed.bucket.Name, retryInterval)
+
+ if err := feed.bucket.Refresh(); err != nil {
+ // if we fail to refresh the bucket, exit the feed
+ // MB-14917
+ logging.Infof("Unable to refresh bucket %s ", err.Error())
+ close(feed.output)
+ feed.outputClosed = true
+ feed.closeNodeFeeds()
+ return
+ }
+
+ // this will only connect to nodes that are not connected or changed
+ // user will have to reconnect the stream
+ err := feed.connectToNodes()
+ if err != nil {
+ logging.Infof("Unable to connect to nodes..exit ")
+ close(feed.output)
+ feed.outputClosed = true
+ feed.closeNodeFeeds()
+ return
+ }
+ bucketOK = err == nil
+
+ select {
+ case <-time.After(retryInterval):
+ case <-feed.quit:
+ return
+ }
+ if retryInterval *= 2; retryInterval > maximumRetryInterval {
+ retryInterval = maximumRetryInterval
+ }
+ }
+}
+
+func (feed *UprFeed) connectToNodes() (err error) {
+ nodeCount := 0
+ for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) {
+
+ // this maybe a reconnection, so check if the connection to the node
+ // already exists. Connect only if the node is not found in the list
+ // or connected == false
+ nodeFeed := feed.nodeFeeds[serverConn.host]
+
+ if nodeFeed != nil && nodeFeed.connected == true {
+ continue
+ }
+
+ var singleFeed *memcached.UprFeed
+ var name string
+ if feed.name == "" {
+ name = "DefaultUprClient"
+ } else {
+ name = feed.name
+ }
+ singleFeed, err = serverConn.StartUprFeed(name, feed.sequence, feed.dcp_buffer_size, feed.data_chan_size)
+ if err != nil {
+ logging.Errorf("go-couchbase: Error connecting to upr feed of %s: %v", serverConn.host, err)
+ feed.closeNodeFeeds()
+ return
+ }
+ // add the node to the connection map
+ feedInfo := &FeedInfo{
+ uprFeed: singleFeed,
+ connected: true,
+ host: serverConn.host,
+ quit: make(chan bool),
+ }
+ feed.nodeFeeds[serverConn.host] = feedInfo
+ go feed.forwardUprEvents(feedInfo, feed.killSwitch, serverConn.host)
+ feed.wg.Add(1)
+ nodeCount++
+ }
+ if nodeCount == 0 {
+ return fmt.Errorf("No connection to bucket")
+ }
+
+ return nil
+}
+
+// Goroutine that forwards Upr events from a single node's feed to the aggregate feed.
+func (feed *UprFeed) forwardUprEvents(nodeFeed *FeedInfo, killSwitch chan bool, host string) {
+ singleFeed := nodeFeed.uprFeed
+
+ defer func() {
+ feed.wg.Done()
+ if r := recover(); r != nil {
+ //if feed is not closing, re-throw the panic
+ if feed.outputClosed != true && feed.closing != true {
+ panic(r)
+ } else {
+ logging.Errorf("Panic is recovered. Since feed is closed, exit gracefully")
+
+ }
+ }
+ }()
+
+ for {
+ select {
+ case <-nodeFeed.quit:
+ nodeFeed.connected = false
+ return
+
+ case event, ok := <-singleFeed.C:
+ if !ok {
+ if singleFeed.Error != nil {
+ logging.Errorf("go-couchbase: Upr feed from %s failed: %v", host, singleFeed.Error)
+ }
+ killSwitch <- true
+ return
+ }
+ if feed.outputClosed == true {
+ // someone closed the node feed
+ logging.Infof("Node need closed, returning from forwardUprEvent")
+ return
+ }
+ feed.output <- event
+ if event.Status == gomemcached.NOT_MY_VBUCKET {
+ logging.Infof(" Got a not my vbucket error !! ")
+ if err := feed.bucket.Refresh(); err != nil {
+ logging.Errorf("Unable to refresh bucket %s ", err.Error())
+ feed.closeNodeFeeds()
+ return
+ }
+ // this will only connect to nodes that are not connected or changed
+ // user will have to reconnect the stream
+ if err := feed.connectToNodes(); err != nil {
+ logging.Errorf("Unable to connect to nodes %s", err.Error())
+ return
+ }
+
+ }
+ }
+ }
+}
+
+func (feed *UprFeed) closeNodeFeeds() {
+ for _, f := range feed.nodeFeeds {
+ logging.Infof(" Sending close to forwardUprEvent ")
+ close(f.quit)
+ f.uprFeed.Close()
+ }
+ feed.nodeFeeds = nil
+}
+
+// Close a Upr feed.
+func (feed *UprFeed) Close() error {
+ select {
+ case <-feed.quit:
+ return nil
+ default:
+ }
+
+ feed.closing = true
+ feed.closeNodeFeeds()
+ close(feed.quit)
+
+ feed.wg.Wait()
+ if feed.outputClosed == false {
+ feed.outputClosed = true
+ close(feed.output)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/users.go b/vendor/github.com/couchbase/go-couchbase/users.go
new file mode 100644
index 0000000..47d4861
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/users.go
@@ -0,0 +1,119 @@
+package couchbase
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type User struct {
+ Name string
+ Id string
+ Domain string
+ Roles []Role
+}
+
+type Role struct {
+ Role string
+ BucketName string `json:"bucket_name"`
+}
+
+// Sample:
+// {"role":"admin","name":"Admin","desc":"Can manage ALL cluster features including security.","ce":true}
+// {"role":"query_select","bucket_name":"*","name":"Query Select","desc":"Can execute SELECT statement on bucket to retrieve data"}
+type RoleDescription struct {
+ Role string
+ Name string
+ Desc string
+ Ce bool
+ BucketName string `json:"bucket_name"`
+}
+
+// Return user-role data, as parsed JSON.
+// Sample:
+// [{"id":"ivanivanov","name":"Ivan Ivanov","roles":[{"role":"cluster_admin"},{"bucket_name":"default","role":"bucket_admin"}]},
+// {"id":"petrpetrov","name":"Petr Petrov","roles":[{"role":"replication_admin"}]}]
+func (c *Client) GetUserRoles() ([]interface{}, error) {
+ ret := make([]interface{}, 0, 1)
+ err := c.parseURLResponse("/settings/rbac/users", &ret)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the configured administrator.
+ // Expected result: {"port":8091,"username":"Administrator"}
+ adminInfo := make(map[string]interface{}, 2)
+ err = c.parseURLResponse("/settings/web", &adminInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create a special entry for the configured administrator.
+ adminResult := map[string]interface{}{
+ "name": adminInfo["username"],
+ "id": adminInfo["username"],
+ "domain": "ns_server",
+ "roles": []interface{}{
+ map[string]interface{}{
+ "role": "admin",
+ },
+ },
+ }
+
+ // Add the configured administrator to the list of results.
+ ret = append(ret, adminResult)
+
+ return ret, nil
+}
+
+func (c *Client) GetUserInfoAll() ([]User, error) {
+ ret := make([]User, 0, 16)
+ err := c.parseURLResponse("/settings/rbac/users", &ret)
+ if err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+func rolesToParamFormat(roles []Role) string {
+ var buffer bytes.Buffer
+ for i, role := range roles {
+ if i > 0 {
+ buffer.WriteString(",")
+ }
+ buffer.WriteString(role.Role)
+ if role.BucketName != "" {
+ buffer.WriteString("[")
+ buffer.WriteString(role.BucketName)
+ buffer.WriteString("]")
+ }
+ }
+ return buffer.String()
+}
+
+func (c *Client) PutUserInfo(u *User) error {
+ params := map[string]interface{}{
+ "name": u.Name,
+ "roles": rolesToParamFormat(u.Roles),
+ }
+ var target string
+ switch u.Domain {
+ case "external":
+ target = "/settings/rbac/users/" + u.Id
+ case "local":
+ target = "/settings/rbac/users/local/" + u.Id
+ default:
+ return fmt.Errorf("Unknown user type: %s", u.Domain)
+ }
+ var ret string // PUT returns an empty string. We ignore it.
+ err := c.parsePutURLResponse(target, params, &ret)
+ return err
+}
+
+func (c *Client) GetRolesAll() ([]RoleDescription, error) {
+ ret := make([]RoleDescription, 0, 32)
+ err := c.parseURLResponse("/settings/rbac/roles", &ret)
+ if err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/util.go b/vendor/github.com/couchbase/go-couchbase/util.go
new file mode 100644
index 0000000..4d286a3
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/util.go
@@ -0,0 +1,49 @@
+package couchbase
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// CleanupHost returns the hostname with the given suffix removed.
+func CleanupHost(h, commonSuffix string) string {
+ if strings.HasSuffix(h, commonSuffix) {
+ return h[:len(h)-len(commonSuffix)]
+ }
+ return h
+}
+
+// FindCommonSuffix returns the longest common suffix from the given
+// strings.
+func FindCommonSuffix(input []string) string {
+ rv := ""
+ if len(input) < 2 {
+ return ""
+ }
+ from := input
+ for i := len(input[0]); i > 0; i-- {
+ common := true
+ suffix := input[0][i:]
+ for _, s := range from {
+ if !strings.HasSuffix(s, suffix) {
+ common = false
+ break
+ }
+ }
+ if common {
+ rv = suffix
+ }
+ }
+ return rv
+}
+
+// ParseURL is a wrapper around url.Parse with some sanity-checking
+func ParseURL(urlStr string) (result *url.URL, err error) {
+ result, err = url.Parse(urlStr)
+ if result != nil && result.Scheme == "" {
+ result = nil
+ err = fmt.Errorf("invalid URL <%s>", urlStr)
+ }
+ return
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/vbmap.go b/vendor/github.com/couchbase/go-couchbase/vbmap.go
new file mode 100644
index 0000000..b96a18e
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/vbmap.go
@@ -0,0 +1,77 @@
+package couchbase
+
+var crc32tab = []uint32{
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d}
+
+// VBHash finds the vbucket for the given key.
+func (b *Bucket) VBHash(key string) uint32 {
+ crc := uint32(0xffffffff)
+ for x := 0; x < len(key); x++ {
+ crc = (crc >> 8) ^ crc32tab[(uint64(crc)^uint64(key[x]))&0xff]
+ }
+ vbm := b.VBServerMap()
+ return ((^crc) >> 16) & 0x7fff & (uint32(len(vbm.VBucketMap)) - 1)
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/views.go b/vendor/github.com/couchbase/go-couchbase/views.go
new file mode 100644
index 0000000..2f68642
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/views.go
@@ -0,0 +1,231 @@
+package couchbase
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// ViewRow represents a single result from a view.
+//
+// Doc is present only if include_docs was set on the request.
+type ViewRow struct {
+ ID string
+ Key interface{}
+ Value interface{}
+ Doc *interface{}
+}
+
+// A ViewError is a node-specific error indicating a partial failure
+// within a view result.
+type ViewError struct {
+ From string
+ Reason string
+}
+
+func (ve ViewError) Error() string {
+ return "Node: " + ve.From + ", reason: " + ve.Reason
+}
+
+// ViewResult holds the entire result set from a view request,
+// including the rows and the errors.
+type ViewResult struct {
+ TotalRows int `json:"total_rows"`
+ Rows []ViewRow
+ Errors []ViewError
+}
+
+func (b *Bucket) randomBaseURL() (*url.URL, error) {
+ nodes := b.HealthyNodes()
+ if len(nodes) == 0 {
+ return nil, errors.New("no available couch rest URLs")
+ }
+ nodeNo := rand.Intn(len(nodes))
+ node := nodes[nodeNo]
+
+ b.RLock()
+ name := b.Name
+ pool := b.pool
+ b.RUnlock()
+
+ u, err := ParseURL(node.CouchAPIBase)
+ if err != nil {
+ return nil, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v",
+ name, nodeNo, node.CouchAPIBase, err)
+ } else if pool != nil {
+ u.User = pool.client.BaseURL.User
+ }
+ return u, err
+}
+
+const START_NODE_ID = -1
+
+func (b *Bucket) randomNextURL(lastNode int) (*url.URL, int, error) {
+ nodes := b.HealthyNodes()
+ if len(nodes) == 0 {
+ return nil, -1, errors.New("no available couch rest URLs")
+ }
+
+ var nodeNo int
+ if lastNode == START_NODE_ID || lastNode >= len(nodes) {
+ // randomly select a node if the value of lastNode is invalid
+ nodeNo = rand.Intn(len(nodes))
+ } else {
+ // wrap around the node list
+ nodeNo = (lastNode + 1) % len(nodes)
+ }
+
+ b.RLock()
+ name := b.Name
+ pool := b.pool
+ b.RUnlock()
+
+ node := nodes[nodeNo]
+ u, err := ParseURL(node.CouchAPIBase)
+ if err != nil {
+ return nil, -1, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v",
+ name, nodeNo, node.CouchAPIBase, err)
+ } else if pool != nil {
+ u.User = pool.client.BaseURL.User
+ }
+ return u, nodeNo, err
+}
+
+// DocID is the document ID type for the startkey_docid parameter in
+// views.
+type DocID string
+
+func qParam(k, v string) string {
+ format := `"%s"`
+ switch k {
+ case "startkey_docid", "endkey_docid", "stale":
+ format = "%s"
+ }
+ return fmt.Sprintf(format, v)
+}
+
+// ViewURL constructs a URL for a view with the given ddoc, view name,
+// and parameters.
+func (b *Bucket) ViewURL(ddoc, name string,
+ params map[string]interface{}) (string, error) {
+ u, err := b.randomBaseURL()
+ if err != nil {
+ return "", err
+ }
+
+ values := url.Values{}
+ for k, v := range params {
+ switch t := v.(type) {
+ case DocID:
+ values[k] = []string{string(t)}
+ case string:
+ values[k] = []string{qParam(k, t)}
+ case int:
+ values[k] = []string{fmt.Sprintf(`%d`, t)}
+ case bool:
+ values[k] = []string{fmt.Sprintf(`%v`, t)}
+ default:
+ b, err := json.Marshal(v)
+ if err != nil {
+ return "", fmt.Errorf("unsupported value-type %T in Query, "+
+ "json encoder said %v", t, err)
+ }
+ values[k] = []string{fmt.Sprintf(`%v`, string(b))}
+ }
+ }
+
+ if ddoc == "" && name == "_all_docs" {
+ u.Path = fmt.Sprintf("/%s/_all_docs", b.GetName())
+ } else {
+ u.Path = fmt.Sprintf("/%s/_design/%s/_view/%s", b.GetName(), ddoc, name)
+ }
+ u.RawQuery = values.Encode()
+
+ return u.String(), nil
+}
+
+// ViewCallback is called for each view invocation.
+var ViewCallback func(ddoc, name string, start time.Time, err error)
+
+// ViewCustom performs a view request that can map row values to a
+// custom type.
+//
+// See the source to View for an example usage.
+func (b *Bucket) ViewCustom(ddoc, name string, params map[string]interface{},
+ vres interface{}) (err error) {
+ if SlowServerCallWarningThreshold > 0 {
+ defer slowLog(time.Now(), "call to ViewCustom(%q, %q)", ddoc, name)
+ }
+
+ if ViewCallback != nil {
+ defer func(t time.Time) { ViewCallback(ddoc, name, t, err) }(time.Now())
+ }
+
+ u, err := b.ViewURL(ddoc, name, params)
+ if err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("GET", u, nil)
+ if err != nil {
+ return err
+ }
+
+ ah := b.authHandler(false /* bucket not yet locked */)
+ maybeAddAuth(req, ah)
+
+ res, err := doHTTPRequest(req)
+ if err != nil {
+ return fmt.Errorf("error starting view req at %v: %v", u, err)
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != 200 {
+ bod := make([]byte, 512)
+ l, _ := res.Body.Read(bod)
+ return fmt.Errorf("error executing view req at %v: %v - %s",
+ u, res.Status, bod[:l])
+ }
+
+ body, err := ioutil.ReadAll(res.Body)
+ if err := json.Unmarshal(body, vres); err != nil {
+ return nil
+ }
+
+ return nil
+}
+
+// View executes a view.
+//
+// The ddoc parameter is just the bare name of your design doc without
+// the "_design/" prefix.
+//
+// Parameters are string keys with values that correspond to couchbase
+// view parameters. Primitive should work fairly naturally (booleans,
+// ints, strings, etc...) and other values will attempt to be JSON
+// marshaled (useful for array indexing on on view keys, for example).
+//
+// Example:
+//
+// res, err := couchbase.View("myddoc", "myview", map[string]interface{}{
+// "group_level": 2,
+// "startkey_docid": []interface{}{"thing"},
+// "endkey_docid": []interface{}{"thing", map[string]string{}},
+// "stale": false,
+// })
+func (b *Bucket) View(ddoc, name string, params map[string]interface{}) (ViewResult, error) {
+ vres := ViewResult{}
+
+ if err := b.ViewCustom(ddoc, name, params, &vres); err != nil {
+ //error in accessing views. Retry once after a bucket refresh
+ b.Refresh()
+ return vres, b.ViewCustom(ddoc, name, params, &vres)
+ } else {
+ return vres, nil
+ }
+}
diff --git a/vendor/github.com/couchbase/gomemcached/.gitignore b/vendor/github.com/couchbase/gomemcached/.gitignore
new file mode 100644
index 0000000..f75d85a
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/.gitignore
@@ -0,0 +1,6 @@
+#*
+*.[68]
+*~
+*.swp
+/gocache/gocache
+c.out
diff --git a/vendor/github.com/couchbase/gomemcached/LICENSE b/vendor/github.com/couchbase/gomemcached/LICENSE
new file mode 100644
index 0000000..b01ef80
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Dustin Sallings
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/couchbase/gomemcached/README.markdown b/vendor/github.com/couchbase/gomemcached/README.markdown
new file mode 100644
index 0000000..5e9b2de
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/README.markdown
@@ -0,0 +1,32 @@
+# gomemcached
+
+This is a memcached binary protocol toolkit in [go][go].
+
+It provides client and server functionality as well as a little sample
+server showing how I might make a server if I valued purity over
+performance.
+
+## Server Design
+
+
+
+
+
+The basic design can be seen in [gocache]. A [storage
+server][storage] is run as a goroutine that receives a `MCRequest` on
+a channel, and then issues an `MCResponse` to a channel contained
+within the request.
+
+Each connection is a separate goroutine, of course, and is responsible
+for all IO for that connection until the connection drops or the
+`dataServer` decides it's stupid and sends a fatal response back over
+the channel.
+
+There is currently no work at all in making the thing perform (there
+are specific areas I know need work). This is just my attempt to
+learn the language somewhat.
+
+[go]: http://golang.org/
+[gocache]: gomemcached/blob/master/gocache/gocache.go
+[storage]: gomemcached/blob/master/gocache/mc_storage.go
diff --git a/vendor/github.com/couchbase/gomemcached/client/mc.go b/vendor/github.com/couchbase/gomemcached/client/mc.go
new file mode 100644
index 0000000..0f1d61e
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/mc.go
@@ -0,0 +1,1140 @@
+// Package memcached provides a memcached binary protocol client.
+package memcached
+
+import (
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/goutils/logging"
+ "github.com/couchbase/goutils/scramsha"
+ "github.com/pkg/errors"
+ "io"
+ "math"
+ "net"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type ClientIface interface {
+ Add(vb uint16, key string, flags int, exp int, body []byte) (*gomemcached.MCResponse, error)
+ Append(vb uint16, key string, data []byte) (*gomemcached.MCResponse, error)
+ Auth(user, pass string) (*gomemcached.MCResponse, error)
+ AuthList() (*gomemcached.MCResponse, error)
+ AuthPlain(user, pass string) (*gomemcached.MCResponse, error)
+ AuthScramSha(user, pass string) (*gomemcached.MCResponse, error)
+ CASNext(vb uint16, k string, exp int, state *CASState) bool
+ CAS(vb uint16, k string, f CasFunc, initexp int) (*gomemcached.MCResponse, error)
+ CollectionsGetCID(scope string, collection string) (*gomemcached.MCResponse, error)
+ Close() error
+ Decr(vb uint16, key string, amt, def uint64, exp int) (uint64, error)
+ Del(vb uint16, key string) (*gomemcached.MCResponse, error)
+ EnableMutationToken() (*gomemcached.MCResponse, error)
+ Get(vb uint16, key string) (*gomemcached.MCResponse, error)
+ GetCollectionsManifest() (*gomemcached.MCResponse, error)
+ GetFromCollection(vb uint16, cid uint32, key string) (*gomemcached.MCResponse, error)
+ GetSubdoc(vb uint16, key string, subPaths []string) (*gomemcached.MCResponse, error)
+ GetAndTouch(vb uint16, key string, exp int) (*gomemcached.MCResponse, error)
+ GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MCResponse, subPaths []string) error
+ GetMeta(vb uint16, key string) (*gomemcached.MCResponse, error)
+ GetRandomDoc() (*gomemcached.MCResponse, error)
+ Hijack() io.ReadWriteCloser
+ Incr(vb uint16, key string, amt, def uint64, exp int) (uint64, error)
+ Observe(vb uint16, key string) (result ObserveResult, err error)
+ ObserveSeq(vb uint16, vbuuid uint64) (result *ObserveSeqResult, err error)
+ Receive() (*gomemcached.MCResponse, error)
+ ReceiveWithDeadline(deadline time.Time) (*gomemcached.MCResponse, error)
+ Send(req *gomemcached.MCRequest) (rv *gomemcached.MCResponse, err error)
+ Set(vb uint16, key string, flags int, exp int, body []byte) (*gomemcached.MCResponse, error)
+ SetKeepAliveOptions(interval time.Duration)
+ SetReadDeadline(t time.Time)
+ SetDeadline(t time.Time)
+ SelectBucket(bucket string) (*gomemcached.MCResponse, error)
+ SetCas(vb uint16, key string, flags int, exp int, cas uint64, body []byte) (*gomemcached.MCResponse, error)
+ Stats(key string) ([]StatValue, error)
+ StatsMap(key string) (map[string]string, error)
+ StatsMapForSpecifiedStats(key string, statsMap map[string]string) error
+ Transmit(req *gomemcached.MCRequest) error
+ TransmitWithDeadline(req *gomemcached.MCRequest, deadline time.Time) error
+ TransmitResponse(res *gomemcached.MCResponse) error
+
+ // UprFeed Related
+ NewUprFeed() (*UprFeed, error)
+ NewUprFeedIface() (UprFeedIface, error)
+ NewUprFeedWithConfig(ackByClient bool) (*UprFeed, error)
+ NewUprFeedWithConfigIface(ackByClient bool) (UprFeedIface, error)
+ UprGetFailoverLog(vb []uint16) (map[uint16]*FailoverLog, error)
+}
+
+const bufsize = 1024
+
+var UnHealthy uint32 = 0
+var Healthy uint32 = 1
+
+type Features []Feature
+type Feature uint16
+
+const FeatureMutationToken = Feature(0x04)
+const FeatureXattr = Feature(0x06)
+const FeatureCollections = Feature(0x12)
+const FeatureDataType = Feature(0x0b)
+
+type memcachedConnection interface {
+ io.ReadWriteCloser
+
+ SetReadDeadline(time.Time) error
+ SetDeadline(time.Time) error
+}
+
+// The Client itself.
+type Client struct {
+ conn memcachedConnection
+ // use uint32 type so that it can be accessed through atomic APIs
+ healthy uint32
+ opaque uint32
+
+ hdrBuf []byte
+}
+
+var (
+ DefaultDialTimeout = time.Duration(0) // No timeout
+
+ DefaultWriteTimeout = time.Duration(0) // No timeout
+
+ dialFun = func(prot, dest string) (net.Conn, error) {
+ return net.DialTimeout(prot, dest, DefaultDialTimeout)
+ }
+)
+
+// Connect to a memcached server.
+func Connect(prot, dest string) (rv *Client, err error) {
+ conn, err := dialFun(prot, dest)
+ if err != nil {
+ return nil, err
+ }
+ return Wrap(conn)
+}
+
+// Connect to a memcached server using TLS.
+func ConnectTLS(prot, dest string, config *tls.Config) (rv *Client, err error) {
+ conn, err := tls.Dial(prot, dest, config)
+ if err != nil {
+ return nil, err
+ }
+ return Wrap(conn)
+}
+
+func SetDefaultTimeouts(dial, read, write time.Duration) {
+ DefaultDialTimeout = dial
+ DefaultWriteTimeout = write
+}
+
+func SetDefaultDialTimeout(dial time.Duration) {
+ DefaultDialTimeout = dial
+}
+
+func (c *Client) SetKeepAliveOptions(interval time.Duration) {
+ tcpConn, ok := c.conn.(*net.TCPConn)
+ if ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(interval)
+ }
+}
+
+func (c *Client) SetReadDeadline(t time.Time) {
+ c.conn.SetReadDeadline(t)
+}
+
+func (c *Client) SetDeadline(t time.Time) {
+ c.conn.SetDeadline(t)
+}
+
+// Wrap an existing transport.
+func Wrap(conn memcachedConnection) (rv *Client, err error) {
+ client := &Client{
+ conn: conn,
+ hdrBuf: make([]byte, gomemcached.HDR_LEN),
+ opaque: uint32(1),
+ }
+ client.setHealthy(true)
+ return client, nil
+}
+
+// Close the connection when you're done.
+func (c *Client) Close() error {
+ return c.conn.Close()
+}
+
+// IsHealthy returns true unless the client is belived to have
+// difficulty communicating to its server.
+//
+// This is useful for connection pools where we want to
+// non-destructively determine that a connection may be reused.
+func (c Client) IsHealthy() bool {
+ healthyState := atomic.LoadUint32(&c.healthy)
+ return healthyState == Healthy
+}
+
+// Send a custom request and get the response.
+func (c *Client) Send(req *gomemcached.MCRequest) (rv *gomemcached.MCResponse, err error) {
+ err = c.Transmit(req)
+ if err != nil {
+ return
+ }
+ resp, _, err := getResponse(c.conn, c.hdrBuf)
+ c.setHealthy(!gomemcached.IsFatal(err))
+ return resp, err
+}
+
+// Transmit send a request, but does not wait for a response.
+func (c *Client) Transmit(req *gomemcached.MCRequest) error {
+ if DefaultWriteTimeout > 0 {
+ c.conn.(net.Conn).SetWriteDeadline(time.Now().Add(DefaultWriteTimeout))
+ }
+ _, err := transmitRequest(c.conn, req)
+ // clear write deadline to avoid interference with future write operations
+ if DefaultWriteTimeout > 0 {
+ c.conn.(net.Conn).SetWriteDeadline(time.Time{})
+ }
+ if err != nil {
+ c.setHealthy(false)
+ }
+ return err
+}
+
+func (c *Client) TransmitWithDeadline(req *gomemcached.MCRequest, deadline time.Time) error {
+ c.conn.(net.Conn).SetWriteDeadline(deadline)
+
+ _, err := transmitRequest(c.conn, req)
+
+ // clear write deadline to avoid interference with future write operations
+ c.conn.(net.Conn).SetWriteDeadline(time.Time{})
+
+ if err != nil {
+ c.setHealthy(false)
+ }
+ return err
+}
+
+// TransmitResponse send a response, does not wait.
+func (c *Client) TransmitResponse(res *gomemcached.MCResponse) error {
+ if DefaultWriteTimeout > 0 {
+ c.conn.(net.Conn).SetWriteDeadline(time.Now().Add(DefaultWriteTimeout))
+ }
+ _, err := transmitResponse(c.conn, res)
+ // clear write deadline to avoid interference with future write operations
+ if DefaultWriteTimeout > 0 {
+ c.conn.(net.Conn).SetWriteDeadline(time.Time{})
+ }
+ if err != nil {
+ c.setHealthy(false)
+ }
+ return err
+}
+
+// Receive a response
+func (c *Client) Receive() (*gomemcached.MCResponse, error) {
+ resp, _, err := getResponse(c.conn, c.hdrBuf)
+ if err != nil && resp.Status != gomemcached.KEY_ENOENT && resp.Status != gomemcached.EBUSY {
+ c.setHealthy(false)
+ }
+ return resp, err
+}
+
+func (c *Client) ReceiveWithDeadline(deadline time.Time) (*gomemcached.MCResponse, error) {
+ c.conn.(net.Conn).SetReadDeadline(deadline)
+
+ resp, _, err := getResponse(c.conn, c.hdrBuf)
+
+ // Clear read deadline to avoid interference with future read operations.
+ c.conn.(net.Conn).SetReadDeadline(time.Time{})
+
+ if err != nil && resp.Status != gomemcached.KEY_ENOENT && resp.Status != gomemcached.EBUSY {
+ c.setHealthy(false)
+ }
+ return resp, err
+}
+
+func appendMutationToken(bytes []byte) []byte {
+ bytes = append(bytes, 0, 0)
+ binary.BigEndian.PutUint16(bytes[len(bytes)-2:], uint16(0x04))
+ return bytes
+}
+
+//Send a hello command to enable MutationTokens
+func (c *Client) EnableMutationToken() (*gomemcached.MCResponse, error) {
+ var payload []byte
+ payload = appendMutationToken(payload)
+
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.HELLO,
+ Key: []byte("GoMemcached"),
+ Body: payload,
+ })
+
+}
+
+//Send a hello command to enable specific features
+func (c *Client) EnableFeatures(features Features) (*gomemcached.MCResponse, error) {
+ var payload []byte
+
+ for _, feature := range features {
+ payload = append(payload, 0, 0)
+ binary.BigEndian.PutUint16(payload[len(payload)-2:], uint16(feature))
+ }
+
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.HELLO,
+ Key: []byte("GoMemcached"),
+ Body: payload,
+ })
+
+}
+
+// Get the value for a key.
+func (c *Client) Get(vb uint16, key string) (*gomemcached.MCResponse, error) {
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.GET,
+ VBucket: vb,
+ Key: []byte(key),
+ })
+}
+
+// Get the value for a key from a collection, identified by collection id.
+func (c *Client) GetFromCollection(vb uint16, cid uint32, key string) (*gomemcached.MCResponse, error) {
+ keyBytes := []byte(key)
+ encodedCid := make([]byte, binary.MaxVarintLen32)
+ lenEncodedCid := binary.PutUvarint(encodedCid, uint64(cid))
+ encodedKey := make([]byte, 0, lenEncodedCid+len(keyBytes))
+ encodedKey = append(encodedKey, encodedCid[0:lenEncodedCid]...)
+ encodedKey = append(encodedKey, keyBytes...)
+
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.GET,
+ VBucket: vb,
+ Key: encodedKey,
+ })
+}
+
+// Get the xattrs, doc value for the input key
+func (c *Client) GetSubdoc(vb uint16, key string, subPaths []string) (*gomemcached.MCResponse, error) {
+
+ extraBuf, valueBuf := GetSubDocVal(subPaths)
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.SUBDOC_MULTI_LOOKUP,
+ VBucket: vb,
+ Key: []byte(key),
+ Extras: extraBuf,
+ Body: valueBuf,
+ })
+
+ if err != nil && IfResStatusError(res) {
+ return res, err
+ }
+ return res, nil
+}
+
+// Retrieve the collections manifest.
+func (c *Client) GetCollectionsManifest() (*gomemcached.MCResponse, error) {
+
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.GET_COLLECTIONS_MANIFEST,
+ })
+
+ if err != nil && IfResStatusError(res) {
+ return res, err
+ }
+ return res, nil
+}
+
+// Retrieve the collections manifest.
+func (c *Client) CollectionsGetCID(scope string, collection string) (*gomemcached.MCResponse, error) {
+
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.COLLECTIONS_GET_CID,
+ Key: []byte(scope + "." + collection),
+ })
+
+ if err != nil && IfResStatusError(res) {
+ return res, err
+ }
+ return res, nil
+}
+
+// Get the value for a key, and update expiry
+func (c *Client) GetAndTouch(vb uint16, key string, exp int) (*gomemcached.MCResponse, error) {
+ extraBuf := make([]byte, 4)
+ binary.BigEndian.PutUint32(extraBuf[0:], uint32(exp))
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.GAT,
+ VBucket: vb,
+ Key: []byte(key),
+ Extras: extraBuf,
+ })
+}
+
+// Get metadata for a key
+func (c *Client) GetMeta(vb uint16, key string) (*gomemcached.MCResponse, error) {
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.GET_META,
+ VBucket: vb,
+ Key: []byte(key),
+ })
+}
+
+// Del deletes a key.
+func (c *Client) Del(vb uint16, key string) (*gomemcached.MCResponse, error) {
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.DELETE,
+ VBucket: vb,
+ Key: []byte(key)})
+}
+
+// Get a random document
+func (c *Client) GetRandomDoc() (*gomemcached.MCResponse, error) {
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: 0xB6,
+ })
+}
+
+// AuthList lists SASL auth mechanisms.
+func (c *Client) AuthList() (*gomemcached.MCResponse, error) {
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.SASL_LIST_MECHS})
+}
+
+// Auth performs SASL PLAIN authentication against the server.
+func (c *Client) Auth(user, pass string) (*gomemcached.MCResponse, error) {
+ res, err := c.AuthList()
+
+ if err != nil {
+ return res, err
+ }
+
+ authMech := string(res.Body)
+ if strings.Index(authMech, "PLAIN") != -1 {
+ return c.AuthPlain(user, pass)
+ }
+ return nil, fmt.Errorf("auth mechanism PLAIN not supported")
+}
+
+// AuthScramSha performs SCRAM-SHA authentication against the server.
+func (c *Client) AuthScramSha(user, pass string) (*gomemcached.MCResponse, error) {
+ res, err := c.AuthList()
+ if err != nil {
+ return nil, errors.Wrap(err, "Unable to obtain list of methods.")
+ }
+
+ methods := string(res.Body)
+ method, err := scramsha.BestMethod(methods)
+ if err != nil {
+ return nil, errors.Wrap(err,
+ "Unable to select SCRAM-SHA method.")
+ }
+
+ s, err := scramsha.NewScramSha(method)
+ if err != nil {
+ return nil, errors.Wrap(err, "Unable to initialize scramsha.")
+ }
+
+ logging.Infof("Using %v authentication for user %v%v%v", method, gomemcached.UdTagBegin, user, gomemcached.UdTagEnd)
+
+ message, err := s.GetStartRequest(user)
+ if err != nil {
+ return nil, errors.Wrapf(err,
+ "Error building start request for user %s.", user)
+ }
+
+ startRequest := &gomemcached.MCRequest{
+ Opcode: 0x21,
+ Key: []byte(method),
+ Body: []byte(message)}
+
+ startResponse, err := c.Send(startRequest)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error sending start request.")
+ }
+
+ err = s.HandleStartResponse(string(startResponse.Body))
+ if err != nil {
+ return nil, errors.Wrap(err, "Error handling start response.")
+ }
+
+ message = s.GetFinalRequest(pass)
+
+ // send step request
+ finalRequest := &gomemcached.MCRequest{
+ Opcode: 0x22,
+ Key: []byte(method),
+ Body: []byte(message)}
+ finalResponse, err := c.Send(finalRequest)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error sending final request.")
+ }
+
+ err = s.HandleFinalResponse(string(finalResponse.Body))
+ if err != nil {
+ return nil, errors.Wrap(err, "Error handling final response.")
+ }
+
+ return finalResponse, nil
+}
+
+func (c *Client) AuthPlain(user, pass string) (*gomemcached.MCResponse, error) {
+ logging.Infof("Using plain authentication for user %v%v%v", gomemcached.UdTagBegin, user, gomemcached.UdTagEnd)
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.SASL_AUTH,
+ Key: []byte("PLAIN"),
+ Body: []byte(fmt.Sprintf("\x00%s\x00%s", user, pass))})
+}
+
+// select bucket
+func (c *Client) SelectBucket(bucket string) (*gomemcached.MCResponse, error) {
+ return c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.SELECT_BUCKET,
+ Key: []byte(bucket)})
+}
+
+func (c *Client) store(opcode gomemcached.CommandCode, vb uint16,
+ key string, flags int, exp int, body []byte) (*gomemcached.MCResponse, error) {
+
+ req := &gomemcached.MCRequest{
+ Opcode: opcode,
+ VBucket: vb,
+ Key: []byte(key),
+ Cas: 0,
+ Opaque: 0,
+ Extras: []byte{0, 0, 0, 0, 0, 0, 0, 0},
+ Body: body}
+
+ binary.BigEndian.PutUint64(req.Extras, uint64(flags)<<32|uint64(exp))
+ return c.Send(req)
+}
+
+func (c *Client) storeCas(opcode gomemcached.CommandCode, vb uint16,
+ key string, flags int, exp int, cas uint64, body []byte) (*gomemcached.MCResponse, error) {
+
+ req := &gomemcached.MCRequest{
+ Opcode: opcode,
+ VBucket: vb,
+ Key: []byte(key),
+ Cas: cas,
+ Opaque: 0,
+ Extras: []byte{0, 0, 0, 0, 0, 0, 0, 0},
+ Body: body}
+
+ binary.BigEndian.PutUint64(req.Extras, uint64(flags)<<32|uint64(exp))
+ return c.Send(req)
+}
+
+// Incr increments the value at the given key.
+func (c *Client) Incr(vb uint16, key string,
+ amt, def uint64, exp int) (uint64, error) {
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.INCREMENT,
+ VBucket: vb,
+ Key: []byte(key),
+ Extras: make([]byte, 8+8+4),
+ }
+ binary.BigEndian.PutUint64(req.Extras[:8], amt)
+ binary.BigEndian.PutUint64(req.Extras[8:16], def)
+ binary.BigEndian.PutUint32(req.Extras[16:20], uint32(exp))
+
+ resp, err := c.Send(req)
+ if err != nil {
+ return 0, err
+ }
+
+ return binary.BigEndian.Uint64(resp.Body), nil
+}
+
+// Decr decrements the value at the given key.
+func (c *Client) Decr(vb uint16, key string,
+ amt, def uint64, exp int) (uint64, error) {
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.DECREMENT,
+ VBucket: vb,
+ Key: []byte(key),
+ Extras: make([]byte, 8+8+4),
+ }
+ binary.BigEndian.PutUint64(req.Extras[:8], amt)
+ binary.BigEndian.PutUint64(req.Extras[8:16], def)
+ binary.BigEndian.PutUint32(req.Extras[16:20], uint32(exp))
+
+ resp, err := c.Send(req)
+ if err != nil {
+ return 0, err
+ }
+
+ return binary.BigEndian.Uint64(resp.Body), nil
+}
+
+// Add a value for a key (store if not exists).
+func (c *Client) Add(vb uint16, key string, flags int, exp int,
+ body []byte) (*gomemcached.MCResponse, error) {
+ return c.store(gomemcached.ADD, vb, key, flags, exp, body)
+}
+
+// Set the value for a key.
+func (c *Client) Set(vb uint16, key string, flags int, exp int,
+ body []byte) (*gomemcached.MCResponse, error) {
+ return c.store(gomemcached.SET, vb, key, flags, exp, body)
+}
+
+// SetCas set the value for a key with cas
+func (c *Client) SetCas(vb uint16, key string, flags int, exp int, cas uint64,
+ body []byte) (*gomemcached.MCResponse, error) {
+ return c.storeCas(gomemcached.SET, vb, key, flags, exp, cas, body)
+}
+
+// Append data to the value of a key.
+func (c *Client) Append(vb uint16, key string, data []byte) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.APPEND,
+ VBucket: vb,
+ Key: []byte(key),
+ Cas: 0,
+ Opaque: 0,
+ Body: data}
+
+ return c.Send(req)
+}
+
+// GetBulk gets keys in bulk
+func (c *Client) GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MCResponse, subPaths []string) error {
+ stopch := make(chan bool)
+ var wg sync.WaitGroup
+
+ defer func() {
+ close(stopch)
+ wg.Wait()
+ }()
+
+ if (math.MaxInt32 - c.opaque) < (uint32(len(keys)) + 1) {
+ c.opaque = uint32(1)
+ }
+
+ opStart := c.opaque
+
+ errch := make(chan error, 2)
+
+ wg.Add(1)
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ logging.Infof("Recovered in f %v", r)
+ }
+ errch <- nil
+ wg.Done()
+ }()
+
+ ok := true
+ for ok {
+
+ select {
+ case <-stopch:
+ return
+ default:
+ res, err := c.Receive()
+
+ if err != nil && IfResStatusError(res) {
+ if res == nil || res.Status != gomemcached.KEY_ENOENT {
+ errch <- err
+ return
+ }
+ // continue receiving in case of KEY_ENOENT
+ } else if res.Opcode == gomemcached.GET ||
+ res.Opcode == gomemcached.SUBDOC_GET ||
+ res.Opcode == gomemcached.SUBDOC_MULTI_LOOKUP {
+ opaque := res.Opaque - opStart
+ if opaque < 0 || opaque >= uint32(len(keys)) {
+ // Every now and then we seem to be seeing an invalid opaque
+ // value returned from the server. When this happens log the error
+ // and the calling function will retry the bulkGet. MB-15140
+ logging.Errorf(" Invalid opaque Value. Debug info : Res.opaque : %v(%v), Keys %v, Response received %v \n key list %v this key %v", res.Opaque, opaque, len(keys), res, keys, string(res.Body))
+ errch <- fmt.Errorf("Out of Bounds error")
+ return
+ }
+
+ rv[keys[opaque]] = res
+ }
+
+ if res.Opcode == gomemcached.NOOP {
+ ok = false
+ }
+ }
+ }
+ }()
+
+ memcachedReqPkt := &gomemcached.MCRequest{
+ Opcode: gomemcached.GET,
+ VBucket: vb,
+ }
+
+ if len(subPaths) > 0 {
+ extraBuf, valueBuf := GetSubDocVal(subPaths)
+ memcachedReqPkt.Opcode = gomemcached.SUBDOC_MULTI_LOOKUP
+ memcachedReqPkt.Extras = extraBuf
+ memcachedReqPkt.Body = valueBuf
+ }
+
+ for _, k := range keys { // Start of Get request
+ memcachedReqPkt.Key = []byte(k)
+ memcachedReqPkt.Opaque = c.opaque
+
+ err := c.Transmit(memcachedReqPkt)
+ if err != nil {
+ logging.Errorf(" Transmit failed in GetBulkAll %v", err)
+ return err
+ }
+ c.opaque++
+ } // End of Get request
+
+ // finally transmit a NOOP
+ err := c.Transmit(&gomemcached.MCRequest{
+ Opcode: gomemcached.NOOP,
+ VBucket: vb,
+ Opaque: c.opaque,
+ })
+
+ if err != nil {
+ logging.Errorf(" Transmit of NOOP failed %v", err)
+ return err
+ }
+ c.opaque++
+
+ return <-errch
+}
+
+func GetSubDocVal(subPaths []string) (extraBuf, valueBuf []byte) {
+
+ var ops []string
+ totalBytesLen := 0
+ num := 1
+
+ for _, v := range subPaths {
+ totalBytesLen = totalBytesLen + len([]byte(v))
+ ops = append(ops, v)
+ num = num + 1
+ }
+
+ // Xattr retrieval - subdoc multi get
+ extraBuf = append(extraBuf, uint8(0x04))
+
+ valueBuf = make([]byte, num*4+totalBytesLen)
+
+ //opcode for subdoc get
+ op := gomemcached.SUBDOC_GET
+
+ // Calculate path total bytes
+ // There are 2 ops - get xattrs - both input and $document and get whole doc
+ valIter := 0
+
+ for _, v := range ops {
+ pathBytes := []byte(v)
+ valueBuf[valIter+0] = uint8(op)
+
+ // SubdocFlagXattrPath indicates that the path refers to
+ // an Xattr rather than the document body.
+ valueBuf[valIter+1] = uint8(gomemcached.SUBDOC_FLAG_XATTR)
+
+ // 2 byte key
+ binary.BigEndian.PutUint16(valueBuf[valIter+2:], uint16(len(pathBytes)))
+
+ // Then n bytes path
+ copy(valueBuf[valIter+4:], pathBytes)
+ valIter = valIter + 4 + len(pathBytes)
+ }
+
+ return
+}
+
+// ObservedStatus is the type reported by the Observe method
+type ObservedStatus uint8
+
+// Observation status values.
+const (
+ ObservedNotPersisted = ObservedStatus(0x00) // found, not persisted
+ ObservedPersisted = ObservedStatus(0x01) // found, persisted
+ ObservedNotFound = ObservedStatus(0x80) // not found (or a persisted delete)
+ ObservedLogicallyDeleted = ObservedStatus(0x81) // pending deletion (not persisted yet)
+)
+
+// ObserveResult represents the data obtained by an Observe call
+type ObserveResult struct {
+ Status ObservedStatus // Whether the value has been persisted/deleted
+ Cas uint64 // Current value's CAS
+ PersistenceTime time.Duration // Node's average time to persist a value
+ ReplicationTime time.Duration // Node's average time to replicate a value
+}
+
+// Observe gets the persistence/replication/CAS state of a key
+func (c *Client) Observe(vb uint16, key string) (result ObserveResult, err error) {
+ // http://www.couchbase.com/wiki/display/couchbase/Observe
+ body := make([]byte, 4+len(key))
+ binary.BigEndian.PutUint16(body[0:2], vb)
+ binary.BigEndian.PutUint16(body[2:4], uint16(len(key)))
+ copy(body[4:4+len(key)], key)
+
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.OBSERVE,
+ VBucket: vb,
+ Body: body,
+ })
+ if err != nil {
+ return
+ }
+
+ // Parse the response data from the body:
+ if len(res.Body) < 2+2+1 {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ outVb := binary.BigEndian.Uint16(res.Body[0:2])
+ keyLen := binary.BigEndian.Uint16(res.Body[2:4])
+ if len(res.Body) < 2+2+int(keyLen)+1+8 {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ outKey := string(res.Body[4 : 4+keyLen])
+ if outVb != vb || outKey != key {
+ err = fmt.Errorf("observe returned wrong vbucket/key: %d/%q", outVb, outKey)
+ return
+ }
+ result.Status = ObservedStatus(res.Body[4+keyLen])
+ result.Cas = binary.BigEndian.Uint64(res.Body[5+keyLen:])
+ // The response reuses the Cas field to store time statistics:
+ result.PersistenceTime = time.Duration(res.Cas>>32) * time.Millisecond
+ result.ReplicationTime = time.Duration(res.Cas&math.MaxUint32) * time.Millisecond
+ return
+}
+
+// CheckPersistence checks whether a stored value has been persisted to disk yet.
+func (result ObserveResult) CheckPersistence(cas uint64, deletion bool) (persisted bool, overwritten bool) {
+ switch {
+ case result.Status == ObservedNotFound && deletion:
+ persisted = true
+ case result.Cas != cas:
+ overwritten = true
+ case result.Status == ObservedPersisted:
+ persisted = true
+ }
+ return
+}
+
+// Sequence number based Observe Implementation
+type ObserveSeqResult struct {
+ Failover uint8 // Set to 1 if a failover took place
+ VbId uint16 // vbucket id
+ Vbuuid uint64 // vucket uuid
+ LastPersistedSeqNo uint64 // last persisted sequence number
+ CurrentSeqNo uint64 // current sequence number
+ OldVbuuid uint64 // Old bucket vbuuid
+ LastSeqNo uint64 // last sequence number received before failover
+}
+
+func (c *Client) ObserveSeq(vb uint16, vbuuid uint64) (result *ObserveSeqResult, err error) {
+ // http://www.couchbase.com/wiki/display/couchbase/Observe
+ body := make([]byte, 8)
+ binary.BigEndian.PutUint64(body[0:8], vbuuid)
+
+ res, err := c.Send(&gomemcached.MCRequest{
+ Opcode: gomemcached.OBSERVE_SEQNO,
+ VBucket: vb,
+ Body: body,
+ Opaque: 0x01,
+ })
+ if err != nil {
+ return
+ }
+
+ if res.Status != gomemcached.SUCCESS {
+ return nil, fmt.Errorf(" Observe returned error %v", res.Status)
+ }
+
+ // Parse the response data from the body:
+ if len(res.Body) < (1 + 2 + 8 + 8 + 8) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+
+ result = &ObserveSeqResult{}
+ result.Failover = res.Body[0]
+ result.VbId = binary.BigEndian.Uint16(res.Body[1:3])
+ result.Vbuuid = binary.BigEndian.Uint64(res.Body[3:11])
+ result.LastPersistedSeqNo = binary.BigEndian.Uint64(res.Body[11:19])
+ result.CurrentSeqNo = binary.BigEndian.Uint64(res.Body[19:27])
+
+ // in case of failover processing we can have old vbuuid and the last persisted seq number
+ if result.Failover == 1 && len(res.Body) >= (1+2+8+8+8+8+8) {
+ result.OldVbuuid = binary.BigEndian.Uint64(res.Body[27:35])
+ result.LastSeqNo = binary.BigEndian.Uint64(res.Body[35:43])
+ }
+
+ return
+}
+
+// CasOp is the type of operation to perform on this CAS loop.
+type CasOp uint8
+
+const (
+ // CASStore instructs the server to store the new value normally
+ CASStore = CasOp(iota)
+ // CASQuit instructs the client to stop attempting to CAS, leaving value untouched
+ CASQuit
+ // CASDelete instructs the server to delete the current value
+ CASDelete
+)
+
+// User specified termination is returned as an error.
+func (c CasOp) Error() string {
+ switch c {
+ case CASStore:
+ return "CAS store"
+ case CASQuit:
+ return "CAS quit"
+ case CASDelete:
+ return "CAS delete"
+ }
+ panic("Unhandled value")
+}
+
+//////// CAS TRANSFORM
+
+// CASState tracks the state of CAS over several operations.
+//
+// This is used directly by CASNext and indirectly by CAS
+type CASState struct {
+ initialized bool // false on the first call to CASNext, then true
+ Value []byte // Current value of key; update in place to new value
+ Cas uint64 // Current CAS value of key
+ Exists bool // Does a value exist for the key? (If not, Value will be nil)
+ Err error // Error, if any, after CASNext returns false
+ resp *gomemcached.MCResponse
+}
+
+// CASNext is a non-callback, loop-based version of CAS method.
+//
+// Usage is like this:
+//
+// var state memcached.CASState
+// for client.CASNext(vb, key, exp, &state) {
+// state.Value = some_mutation(state.Value)
+// }
+// if state.Err != nil { ... }
+func (c *Client) CASNext(vb uint16, k string, exp int, state *CASState) bool {
+ if state.initialized {
+ if !state.Exists {
+ // Adding a new key:
+ if state.Value == nil {
+ state.Cas = 0
+ return false // no-op (delete of non-existent value)
+ }
+ state.resp, state.Err = c.Add(vb, k, 0, exp, state.Value)
+ } else {
+ // Updating / deleting a key:
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.DELETE,
+ VBucket: vb,
+ Key: []byte(k),
+ Cas: state.Cas}
+ if state.Value != nil {
+ req.Opcode = gomemcached.SET
+ req.Opaque = 0
+ req.Extras = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ req.Body = state.Value
+
+ flags := 0
+ binary.BigEndian.PutUint64(req.Extras, uint64(flags)<<32|uint64(exp))
+ }
+ state.resp, state.Err = c.Send(req)
+ }
+
+ // If the response status is KEY_EEXISTS or NOT_STORED there's a conflict and we'll need to
+ // get the new value (below). Otherwise, we're done (either success or failure) so return:
+ if !(state.resp != nil && (state.resp.Status == gomemcached.KEY_EEXISTS ||
+ state.resp.Status == gomemcached.NOT_STORED)) {
+ state.Cas = state.resp.Cas
+ return false // either success or fatal error
+ }
+ }
+
+ // Initial call, or after a conflict: GET the current value and CAS and return them:
+ state.initialized = true
+ if state.resp, state.Err = c.Get(vb, k); state.Err == nil {
+ state.Exists = true
+ state.Value = state.resp.Body
+ state.Cas = state.resp.Cas
+ } else if state.resp != nil && state.resp.Status == gomemcached.KEY_ENOENT {
+ state.Err = nil
+ state.Exists = false
+ state.Value = nil
+ state.Cas = 0
+ } else {
+ return false // fatal error
+ }
+ return true // keep going...
+}
+
+// CasFunc is type type of function to perform a CAS transform.
+//
+// Input is the current value, or nil if no value exists.
+// The function should return the new value (if any) to set, and the store/quit/delete operation.
+type CasFunc func(current []byte) ([]byte, CasOp)
+
+// CAS performs a CAS transform with the given function.
+//
+// If the value does not exist, a nil current value will be sent to f.
+func (c *Client) CAS(vb uint16, k string, f CasFunc,
+ initexp int) (*gomemcached.MCResponse, error) {
+ var state CASState
+ for c.CASNext(vb, k, initexp, &state) {
+ newValue, operation := f(state.Value)
+ if operation == CASQuit || (operation == CASDelete && state.Value == nil) {
+ return nil, operation
+ }
+ state.Value = newValue
+ }
+ return state.resp, state.Err
+}
+
+// StatValue is one of the stats returned from the Stats method.
+type StatValue struct {
+ // The stat key
+ Key string
+ // The stat value
+ Val string
+}
+
+// Stats requests server-side stats.
+//
+// Use "" as the stat key for toplevel stats.
+func (c *Client) Stats(key string) ([]StatValue, error) {
+ rv := make([]StatValue, 0, 128)
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.STAT,
+ Key: []byte(key),
+ Opaque: 918494,
+ }
+
+ err := c.Transmit(req)
+ if err != nil {
+ return rv, err
+ }
+
+ for {
+ res, _, err := getResponse(c.conn, c.hdrBuf)
+ if err != nil {
+ return rv, err
+ }
+ k := string(res.Key)
+ if k == "" {
+ break
+ }
+ rv = append(rv, StatValue{
+ Key: k,
+ Val: string(res.Body),
+ })
+ }
+ return rv, nil
+}
+
+// StatsMap requests server-side stats similarly to Stats, but returns
+// them as a map.
+//
+// Use "" as the stat key for toplevel stats.
+func (c *Client) StatsMap(key string) (map[string]string, error) {
+ rv := make(map[string]string)
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.STAT,
+ Key: []byte(key),
+ Opaque: 918494,
+ }
+
+ err := c.Transmit(req)
+ if err != nil {
+ return rv, err
+ }
+
+ for {
+ res, _, err := getResponse(c.conn, c.hdrBuf)
+ if err != nil {
+ return rv, err
+ }
+ k := string(res.Key)
+ if k == "" {
+ break
+ }
+ rv[k] = string(res.Body)
+ }
+
+ return rv, nil
+}
+
+// instead of returning a new statsMap, simply populate passed in statsMap, which contains all the keys
+// for which stats needs to be retrieved
+func (c *Client) StatsMapForSpecifiedStats(key string, statsMap map[string]string) error {
+
+ // clear statsMap
+ for key, _ := range statsMap {
+ statsMap[key] = ""
+ }
+
+ req := &gomemcached.MCRequest{
+ Opcode: gomemcached.STAT,
+ Key: []byte(key),
+ Opaque: 918494,
+ }
+
+ err := c.Transmit(req)
+ if err != nil {
+ return err
+ }
+
+ for {
+ res, _, err := getResponse(c.conn, c.hdrBuf)
+ if err != nil {
+ return err
+ }
+ k := string(res.Key)
+ if k == "" {
+ break
+ }
+ if _, ok := statsMap[k]; ok {
+ statsMap[k] = string(res.Body)
+ }
+ }
+
+ return nil
+}
+
+// Hijack exposes the underlying connection from this client.
+//
+// It also marks the connection as unhealthy since the client will
+// have lost control over the connection and can't otherwise verify
+// things are in good shape for connection pools.
+func (c *Client) Hijack() io.ReadWriteCloser {
+ c.setHealthy(false)
+ return c.conn
+}
+
+func (c *Client) setHealthy(healthy bool) {
+ healthyState := UnHealthy
+ if healthy {
+ healthyState = Healthy
+ }
+ atomic.StoreUint32(&c.healthy, healthyState)
+}
+
+func IfResStatusError(response *gomemcached.MCResponse) bool {
+ return response == nil ||
+ (response.Status != gomemcached.SUBDOC_BAD_MULTI &&
+ response.Status != gomemcached.SUBDOC_PATH_NOT_FOUND &&
+ response.Status != gomemcached.SUBDOC_MULTI_PATH_FAILURE_DELETED)
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/tap_feed.go b/vendor/github.com/couchbase/gomemcached/client/tap_feed.go
new file mode 100644
index 0000000..fd628c5
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/tap_feed.go
@@ -0,0 +1,333 @@
+package memcached
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/goutils/logging"
+)
+
+// TAP protocol docs:
+
+// TapOpcode is the tap operation type (found in TapEvent)
+type TapOpcode uint8
+
+// Tap opcode values.
+const (
+ TapBeginBackfill = TapOpcode(iota)
+ TapEndBackfill
+ TapMutation
+ TapDeletion
+ TapCheckpointStart
+ TapCheckpointEnd
+ tapEndStream
+)
+
+const tapMutationExtraLen = 16
+
+var tapOpcodeNames map[TapOpcode]string
+
+func init() {
+ tapOpcodeNames = map[TapOpcode]string{
+ TapBeginBackfill: "BeginBackfill",
+ TapEndBackfill: "EndBackfill",
+ TapMutation: "Mutation",
+ TapDeletion: "Deletion",
+ TapCheckpointStart: "TapCheckpointStart",
+ TapCheckpointEnd: "TapCheckpointEnd",
+ tapEndStream: "EndStream",
+ }
+}
+
+func (opcode TapOpcode) String() string {
+ name := tapOpcodeNames[opcode]
+ if name == "" {
+ name = fmt.Sprintf("#%d", opcode)
+ }
+ return name
+}
+
+// TapEvent is a TAP notification of an operation on the server.
+type TapEvent struct {
+ Opcode TapOpcode // Type of event
+ VBucket uint16 // VBucket this event applies to
+ Flags uint32 // Item flags
+ Expiry uint32 // Item expiration time
+ Key, Value []byte // Item key/value
+ Cas uint64
+}
+
+func makeTapEvent(req gomemcached.MCRequest) *TapEvent {
+ event := TapEvent{
+ VBucket: req.VBucket,
+ }
+ switch req.Opcode {
+ case gomemcached.TAP_MUTATION:
+ event.Opcode = TapMutation
+ event.Key = req.Key
+ event.Value = req.Body
+ event.Cas = req.Cas
+ case gomemcached.TAP_DELETE:
+ event.Opcode = TapDeletion
+ event.Key = req.Key
+ event.Cas = req.Cas
+ case gomemcached.TAP_CHECKPOINT_START:
+ event.Opcode = TapCheckpointStart
+ case gomemcached.TAP_CHECKPOINT_END:
+ event.Opcode = TapCheckpointEnd
+ case gomemcached.TAP_OPAQUE:
+ if len(req.Extras) < 8+4 {
+ return nil
+ }
+ switch op := int(binary.BigEndian.Uint32(req.Extras[8:])); op {
+ case gomemcached.TAP_OPAQUE_INITIAL_VBUCKET_STREAM:
+ event.Opcode = TapBeginBackfill
+ case gomemcached.TAP_OPAQUE_CLOSE_BACKFILL:
+ event.Opcode = TapEndBackfill
+ case gomemcached.TAP_OPAQUE_CLOSE_TAP_STREAM:
+ event.Opcode = tapEndStream
+ case gomemcached.TAP_OPAQUE_ENABLE_AUTO_NACK:
+ return nil
+ case gomemcached.TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC:
+ return nil
+ default:
+ logging.Infof("TapFeed: Ignoring TAP_OPAQUE/%d", op)
+ return nil // unknown opaque event
+ }
+ case gomemcached.NOOP:
+ return nil // ignore
+ default:
+ logging.Infof("TapFeed: Ignoring %s", req.Opcode)
+ return nil // unknown event
+ }
+
+ if len(req.Extras) >= tapMutationExtraLen &&
+ (event.Opcode == TapMutation || event.Opcode == TapDeletion) {
+
+ event.Flags = binary.BigEndian.Uint32(req.Extras[8:])
+ event.Expiry = binary.BigEndian.Uint32(req.Extras[12:])
+ }
+
+ return &event
+}
+
+func (event TapEvent) String() string {
+ switch event.Opcode {
+ case TapBeginBackfill, TapEndBackfill, TapCheckpointStart, TapCheckpointEnd:
+ return fmt.Sprintf("",
+ event.Opcode, event.VBucket)
+ default:
+ return fmt.Sprintf("",
+ event.Opcode, event.Key, len(event.Value),
+ event.Flags, event.Expiry)
+ }
+}
+
+// TapArguments are parameters for requesting a TAP feed.
+//
+// Call DefaultTapArguments to get a default one.
+type TapArguments struct {
+ // Timestamp of oldest item to send.
+ //
+ // Use TapNoBackfill to suppress all past items.
+ Backfill uint64
+ // If set, server will disconnect after sending existing items.
+ Dump bool
+ // The indices of the vbuckets to watch; empty/nil to watch all.
+ VBuckets []uint16
+ // Transfers ownership of vbuckets during cluster rebalance.
+ Takeover bool
+ // If true, server will wait for client ACK after every notification.
+ SupportAck bool
+ // If true, client doesn't want values so server shouldn't send them.
+ KeysOnly bool
+ // If true, client wants the server to send checkpoint events.
+ Checkpoint bool
+ // Optional identifier to use for this client, to allow reconnects
+ ClientName string
+ // Registers this client (by name) till explicitly deregistered.
+ RegisteredClient bool
+}
+
+// Value for TapArguments.Backfill denoting that no past events at all
+// should be sent.
+const TapNoBackfill = math.MaxUint64
+
+// DefaultTapArguments returns a default set of parameter values to
+// pass to StartTapFeed.
+func DefaultTapArguments() TapArguments {
+ return TapArguments{
+ Backfill: TapNoBackfill,
+ }
+}
+
+func (args *TapArguments) flags() []byte {
+ var flags gomemcached.TapConnectFlag
+ if args.Backfill != 0 {
+ flags |= gomemcached.BACKFILL
+ }
+ if args.Dump {
+ flags |= gomemcached.DUMP
+ }
+ if len(args.VBuckets) > 0 {
+ flags |= gomemcached.LIST_VBUCKETS
+ }
+ if args.Takeover {
+ flags |= gomemcached.TAKEOVER_VBUCKETS
+ }
+ if args.SupportAck {
+ flags |= gomemcached.SUPPORT_ACK
+ }
+ if args.KeysOnly {
+ flags |= gomemcached.REQUEST_KEYS_ONLY
+ }
+ if args.Checkpoint {
+ flags |= gomemcached.CHECKPOINT
+ }
+ if args.RegisteredClient {
+ flags |= gomemcached.REGISTERED_CLIENT
+ }
+ encoded := make([]byte, 4)
+ binary.BigEndian.PutUint32(encoded, uint32(flags))
+ return encoded
+}
+
+func must(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (args *TapArguments) bytes() (rv []byte) {
+ buf := bytes.NewBuffer([]byte{})
+
+ if args.Backfill > 0 {
+ must(binary.Write(buf, binary.BigEndian, uint64(args.Backfill)))
+ }
+
+ if len(args.VBuckets) > 0 {
+ must(binary.Write(buf, binary.BigEndian, uint16(len(args.VBuckets))))
+ for i := 0; i < len(args.VBuckets); i++ {
+ must(binary.Write(buf, binary.BigEndian, uint16(args.VBuckets[i])))
+ }
+ }
+ return buf.Bytes()
+}
+
+// TapFeed represents a stream of events from a server.
+type TapFeed struct {
+ C <-chan TapEvent
+ Error error
+ closer chan bool
+}
+
+// StartTapFeed starts a TAP feed on a client connection.
+//
+// The events can be read from the returned channel. The connection
+// can no longer be used for other purposes; it's now reserved for
+// receiving the TAP messages. To stop receiving events, close the
+// client connection.
+func (mc *Client) StartTapFeed(args TapArguments) (*TapFeed, error) {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.TAP_CONNECT,
+ Key: []byte(args.ClientName),
+ Extras: args.flags(),
+ Body: args.bytes()}
+
+ err := mc.Transmit(rq)
+ if err != nil {
+ return nil, err
+ }
+
+ ch := make(chan TapEvent)
+ feed := &TapFeed{
+ C: ch,
+ closer: make(chan bool),
+ }
+ go mc.runFeed(ch, feed)
+ return feed, nil
+}
+
+// TapRecvHook is called after every incoming tap packet is received.
+var TapRecvHook func(*gomemcached.MCRequest, int, error)
+
+// Internal goroutine that reads from the socket and writes events to
+// the channel
+func (mc *Client) runFeed(ch chan TapEvent, feed *TapFeed) {
+ defer close(ch)
+ var headerBuf [gomemcached.HDR_LEN]byte
+loop:
+ for {
+ // Read the next request from the server.
+ //
+ // (Can't call mc.Receive() because it reads a
+ // _response_ not a request.)
+ var pkt gomemcached.MCRequest
+ n, err := pkt.Receive(mc.conn, headerBuf[:])
+ if TapRecvHook != nil {
+ TapRecvHook(&pkt, n, err)
+ }
+
+ if err != nil {
+ if err != io.EOF {
+ feed.Error = err
+ }
+ break loop
+ }
+
+ //logging.Infof("** TapFeed received %#v : %q", pkt, pkt.Body)
+
+ if pkt.Opcode == gomemcached.TAP_CONNECT {
+ // This is not an event from the server; it's
+ // an error response to my connect request.
+ feed.Error = fmt.Errorf("tap connection failed: %s", pkt.Body)
+ break loop
+ }
+
+ event := makeTapEvent(pkt)
+ if event != nil {
+ if event.Opcode == tapEndStream {
+ break loop
+ }
+
+ select {
+ case ch <- *event:
+ case <-feed.closer:
+ break loop
+ }
+ }
+
+ if len(pkt.Extras) >= 4 {
+ reqFlags := binary.BigEndian.Uint16(pkt.Extras[2:])
+ if reqFlags&gomemcached.TAP_ACK != 0 {
+ if _, err := mc.sendAck(&pkt); err != nil {
+ feed.Error = err
+ break loop
+ }
+ }
+ }
+ }
+ if err := mc.Close(); err != nil {
+ logging.Errorf("Error closing memcached client: %v", err)
+ }
+}
+
+func (mc *Client) sendAck(pkt *gomemcached.MCRequest) (int, error) {
+ res := gomemcached.MCResponse{
+ Opcode: pkt.Opcode,
+ Opaque: pkt.Opaque,
+ Status: gomemcached.SUCCESS,
+ }
+ return res.Transmit(mc.conn)
+}
+
+// Close terminates a TapFeed.
+//
+// Call this if you stop using a TapFeed before its channel ends.
+func (feed *TapFeed) Close() {
+ close(feed.closer)
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/transport.go b/vendor/github.com/couchbase/gomemcached/client/transport.go
new file mode 100644
index 0000000..f4cea17
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/transport.go
@@ -0,0 +1,67 @@
+package memcached
+
+import (
+ "errors"
+ "io"
+
+ "github.com/couchbase/gomemcached"
+)
+
+var errNoConn = errors.New("no connection")
+
+// UnwrapMemcachedError converts memcached errors to normal responses.
+//
+// If the error is a memcached response, declare the error to be nil
+// so a client can handle the status without worrying about whether it
+// indicates success or failure.
+func UnwrapMemcachedError(rv *gomemcached.MCResponse,
+ err error) (*gomemcached.MCResponse, error) {
+
+ if rv == err {
+ return rv, nil
+ }
+ return rv, err
+}
+
+// ReceiveHook is called after every packet is received (or attempted to be)
+var ReceiveHook func(*gomemcached.MCResponse, int, error)
+
+func getResponse(s io.Reader, hdrBytes []byte) (rv *gomemcached.MCResponse, n int, err error) {
+ if s == nil {
+ return nil, 0, errNoConn
+ }
+
+ rv = &gomemcached.MCResponse{}
+ n, err = rv.Receive(s, hdrBytes)
+
+ if ReceiveHook != nil {
+ ReceiveHook(rv, n, err)
+ }
+
+ if err == nil && (rv.Status != gomemcached.SUCCESS && rv.Status != gomemcached.AUTH_CONTINUE) {
+ err = rv
+ }
+ return rv, n, err
+}
+
+// TransmitHook is called after each packet is transmitted.
+var TransmitHook func(*gomemcached.MCRequest, int, error)
+
+func transmitRequest(o io.Writer, req *gomemcached.MCRequest) (int, error) {
+ if o == nil {
+ return 0, errNoConn
+ }
+ n, err := req.Transmit(o)
+ if TransmitHook != nil {
+ TransmitHook(req, n, err)
+ }
+ return n, err
+}
+
+func transmitResponse(o io.Writer, res *gomemcached.MCResponse) (int, error) {
+ if o == nil {
+ return 0, errNoConn
+ }
+ n, err := res.Transmit(o)
+ return n, err
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/upr_feed.go b/vendor/github.com/couchbase/gomemcached/client/upr_feed.go
new file mode 100644
index 0000000..95fa125
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/client/upr_feed.go
@@ -0,0 +1,1107 @@
+// go implementation of upr client.
+// See https://github.com/couchbaselabs/cbupr/blob/master/transport-spec.md
+// TODO
+// 1. Use a pool allocator to avoid garbage
+package memcached
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "github.com/couchbase/gomemcached"
+ "github.com/couchbase/goutils/logging"
+ "strconv"
+ "sync"
+ "sync/atomic"
+)
+
+const uprMutationExtraLen = 30
+const uprDeletetionExtraLen = 18
+const uprDeletetionWithDeletionTimeExtraLen = 21
+const uprSnapshotExtraLen = 20
+const bufferAckThreshold = 0.2
+const opaqueOpen = 0xBEAF0001
+const opaqueFailover = 0xDEADBEEF
+const uprDefaultNoopInterval = 120
+
+// Counter on top of opaqueOpen that others can draw from for open and control msgs
+var opaqueOpenCtrlWell uint32 = opaqueOpen
+
+// UprEvent memcached events for UPR streams.
+type UprEvent struct {
+ Opcode gomemcached.CommandCode // Type of event
+ Status gomemcached.Status // Response status
+ VBucket uint16 // VBucket this event applies to
+ DataType uint8 // data type
+ Opaque uint16 // 16 MSB of opaque
+ VBuuid uint64 // This field is set by downstream
+ Flags uint32 // Item flags
+ Expiry uint32 // Item expiration time
+ Key, Value []byte // Item key/value
+ OldValue []byte // TODO: TBD: old document value
+ Cas uint64 // CAS value of the item
+ Seqno uint64 // sequence number of the mutation
+ RevSeqno uint64 // rev sequence number : deletions
+ LockTime uint32 // Lock time
+ MetadataSize uint16 // Metadata size
+ SnapstartSeq uint64 // start sequence number of this snapshot
+ SnapendSeq uint64 // End sequence number of the snapshot
+ SnapshotType uint32 // 0: disk 1: memory
+ FailoverLog *FailoverLog // Failover log containing vvuid and sequnce number
+ Error error // Error value in case of a failure
+ ExtMeta []byte
+ AckSize uint32 // The number of bytes that can be Acked to DCP
+}
+
+type PriorityType string
+
+// high > medium > disabled > low
+const (
+ PriorityDisabled PriorityType = ""
+ PriorityLow PriorityType = "low"
+ PriorityMed PriorityType = "medium"
+ PriorityHigh PriorityType = "high"
+)
+
+// UprStream is per stream data structure over an UPR Connection.
+type UprStream struct {
+ Vbucket uint16 // Vbucket id
+ Vbuuid uint64 // vbucket uuid
+ StartSeq uint64 // start sequence number
+ EndSeq uint64 // end sequence number
+ connected bool
+}
+
+type FeedState int
+
+const (
+ FeedStateInitial = iota
+ FeedStateOpened = iota
+ FeedStateClosed = iota
+)
+
+func (fs FeedState) String() string {
+ switch fs {
+ case FeedStateInitial:
+ return "Initial"
+ case FeedStateOpened:
+ return "Opened"
+ case FeedStateClosed:
+ return "Closed"
+ default:
+ return "Unknown"
+ }
+}
+
+const (
+ CompressionTypeStartMarker = iota // also means invalid
+ CompressionTypeNone = iota
+ CompressionTypeSnappy = iota
+ CompressionTypeEndMarker = iota // also means invalid
+)
+
+// kv_engine/include/mcbp/protocol/datatype.h
+const (
+ JSONDataType uint8 = 1
+ SnappyDataType uint8 = 2
+ XattrDataType uint8 = 4
+)
+
+type UprFeatures struct {
+ Xattribute bool
+ CompressionType int
+ IncludeDeletionTime bool
+ DcpPriority PriorityType
+ EnableExpiry bool
+}
+
+/**
+ * Used to handle multiple concurrent calls UprRequestStream() by UprFeed clients
+ * It is expected that a client that calls UprRequestStream() more than once should issue
+ * different "opaque" (version) numbers
+ */
+type opaqueStreamMap map[uint16]*UprStream // opaque -> stream
+
+type vbStreamNegotiator struct {
+ vbHandshakeMap map[uint16]opaqueStreamMap // vbno -> opaqueStreamMap
+ mutex sync.RWMutex
+}
+
+func (negotiator *vbStreamNegotiator) initialize() {
+ negotiator.mutex.Lock()
+ negotiator.vbHandshakeMap = make(map[uint16]opaqueStreamMap)
+ negotiator.mutex.Unlock()
+}
+
+func (negotiator *vbStreamNegotiator) registerRequest(vbno, opaque uint16, vbuuid, startSequence, endSequence uint64) {
+ negotiator.mutex.Lock()
+ defer negotiator.mutex.Unlock()
+
+ var osMap opaqueStreamMap
+ var ok bool
+ if osMap, ok = negotiator.vbHandshakeMap[vbno]; !ok {
+ osMap = make(opaqueStreamMap)
+ negotiator.vbHandshakeMap[vbno] = osMap
+ }
+
+ if _, ok = osMap[opaque]; !ok {
+ osMap[opaque] = &UprStream{
+ Vbucket: vbno,
+ Vbuuid: vbuuid,
+ StartSeq: startSequence,
+ EndSeq: endSequence,
+ }
+ }
+}
+
+func (negotiator *vbStreamNegotiator) getStreamsCntFromMap(vbno uint16) int {
+ negotiator.mutex.RLock()
+ defer negotiator.mutex.RUnlock()
+
+ osmap, ok := negotiator.vbHandshakeMap[vbno]
+ if !ok {
+ return 0
+ } else {
+ return len(osmap)
+ }
+}
+
+func (negotiator *vbStreamNegotiator) getStreamFromMap(vbno, opaque uint16) (*UprStream, error) {
+ negotiator.mutex.RLock()
+ defer negotiator.mutex.RUnlock()
+
+ osmap, ok := negotiator.vbHandshakeMap[vbno]
+ if !ok {
+ return nil, fmt.Errorf("Error: stream for vb: %v does not exist", vbno)
+ }
+
+ stream, ok := osmap[opaque]
+ if !ok {
+ return nil, fmt.Errorf("Error: stream for vb: %v opaque: %v does not exist", vbno, opaque)
+ }
+ return stream, nil
+}
+
+func (negotiator *vbStreamNegotiator) deleteStreamFromMap(vbno, opaque uint16) {
+ negotiator.mutex.Lock()
+ defer negotiator.mutex.Unlock()
+
+ osmap, ok := negotiator.vbHandshakeMap[vbno]
+ if !ok {
+ return
+ }
+
+ delete(osmap, opaque)
+ if len(osmap) == 0 {
+ delete(negotiator.vbHandshakeMap, vbno)
+ }
+}
+
+func (negotiator *vbStreamNegotiator) handleStreamRequest(feed *UprFeed,
+ headerBuf [gomemcached.HDR_LEN]byte, pktPtr *gomemcached.MCRequest, bytesReceivedFromDCP int,
+ response *gomemcached.MCResponse) (*UprEvent, error) {
+ var event *UprEvent
+
+ if feed == nil || response == nil || pktPtr == nil {
+ return nil, errors.New("Invalid inputs")
+ }
+
+ // Get Stream from negotiator map
+ vbno := vbOpaque(response.Opaque)
+ opaque := appOpaque(response.Opaque)
+
+ stream, err := negotiator.getStreamFromMap(vbno, opaque)
+ if err != nil {
+ err = fmt.Errorf("Stream not found for vb %d: %#v", vbno, *pktPtr)
+ logging.Errorf(err.Error())
+ return nil, err
+ }
+
+ status, rb, flog, err := handleStreamRequest(response, headerBuf[:])
+
+ if status == gomemcached.ROLLBACK {
+ event = makeUprEvent(*pktPtr, stream, bytesReceivedFromDCP)
+ event.Status = status
+ // rollback stream
+ logging.Infof("UPR_STREAMREQ with rollback %d for vb %d Failed: %v", rb, vbno, err)
+ negotiator.deleteStreamFromMap(vbno, opaque)
+ } else if status == gomemcached.SUCCESS {
+ event = makeUprEvent(*pktPtr, stream, bytesReceivedFromDCP)
+ event.Seqno = stream.StartSeq
+ event.FailoverLog = flog
+ event.Status = status
+ feed.activateStream(vbno, opaque, stream)
+ feed.negotiator.deleteStreamFromMap(vbno, opaque)
+ logging.Infof("UPR_STREAMREQ for vb %d successful", vbno)
+
+ } else if err != nil {
+ logging.Errorf("UPR_STREAMREQ for vbucket %d erro %s", vbno, err.Error())
+ event = &UprEvent{
+ Opcode: gomemcached.UPR_STREAMREQ,
+ Status: status,
+ VBucket: vbno,
+ Error: err,
+ }
+ negotiator.deleteStreamFromMap(vbno, opaque)
+ }
+ return event, nil
+}
+
+func (negotiator *vbStreamNegotiator) cleanUpVbStreams(vbno uint16) {
+ negotiator.mutex.Lock()
+ defer negotiator.mutex.Unlock()
+
+ delete(negotiator.vbHandshakeMap, vbno)
+}
+
+// UprFeed represents an UPR feed. A feed contains a connection to a single
+// host and multiple vBuckets
+type UprFeed struct {
+ // lock for feed.vbstreams
+ muVbstreams sync.RWMutex
+ C <-chan *UprEvent // Exported channel for receiving UPR events
+ negotiator vbStreamNegotiator // Used for pre-vbstreams, concurrent vb stream negotiation
+ vbstreams map[uint16]*UprStream // official live vb->stream mapping
+ closer chan bool // closer
+ conn *Client // connection to UPR producer
+ Error error // error
+ bytesRead uint64 // total bytes read on this connection
+ toAckBytes uint32 // bytes client has read
+ maxAckBytes uint32 // Max buffer control ack bytes
+ stats UprStats // Stats for upr client
+ transmitCh chan *gomemcached.MCRequest // transmit command channel
+ transmitCl chan bool // closer channel for transmit go-routine
+ // if flag is true, upr feed will use ack from client to determine whether/when to send ack to DCP
+ // if flag is false, upr feed will track how many bytes it has sent to client
+ // and use that to determine whether/when to send ack to DCP
+ ackByClient bool
+ feedState FeedState
+ muFeedState sync.RWMutex
+}
+
+// Exported interface - to allow for mocking
+type UprFeedIface interface {
+ Close()
+ Closed() bool
+ CloseStream(vbno, opaqueMSB uint16) error
+ GetError() error
+ GetUprStats() *UprStats
+ ClientAck(event *UprEvent) error
+ GetUprEventCh() <-chan *UprEvent
+ StartFeed() error
+ StartFeedWithConfig(datachan_len int) error
+ UprOpen(name string, sequence uint32, bufSize uint32) error
+ UprOpenWithXATTR(name string, sequence uint32, bufSize uint32) error
+ UprOpenWithFeatures(name string, sequence uint32, bufSize uint32, features UprFeatures) (error, UprFeatures)
+ UprRequestStream(vbno, opaqueMSB uint16, flags uint32, vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error
+ // Set DCP priority on an existing DCP connection. The command is sent asynchronously without waiting for a response
+ SetPriorityAsync(p PriorityType) error
+}
+
+type UprStats struct {
+ TotalBytes uint64
+ TotalMutation uint64
+ TotalBufferAckSent uint64
+ TotalSnapShot uint64
+}
+
+// FailoverLog containing vvuid and sequnce number
+type FailoverLog [][2]uint64
+
+// error codes
+var ErrorInvalidLog = errors.New("couchbase.errorInvalidLog")
+
+func (flogp *FailoverLog) Latest() (vbuuid, seqno uint64, err error) {
+ if flogp != nil {
+ flog := *flogp
+ latest := flog[len(flog)-1]
+ return latest[0], latest[1], nil
+ }
+ return vbuuid, seqno, ErrorInvalidLog
+}
+
+func makeUprEvent(rq gomemcached.MCRequest, stream *UprStream, bytesReceivedFromDCP int) *UprEvent {
+ event := &UprEvent{
+ Opcode: rq.Opcode,
+ VBucket: stream.Vbucket,
+ VBuuid: stream.Vbuuid,
+ Key: rq.Key,
+ Value: rq.Body,
+ Cas: rq.Cas,
+ ExtMeta: rq.ExtMeta,
+ DataType: rq.DataType,
+ }
+
+ // set AckSize for events that need to be acked to DCP,
+ // i.e., events with CommandCodes that need to be buffered in DCP
+ if _, ok := gomemcached.BufferedCommandCodeMap[rq.Opcode]; ok {
+ event.AckSize = uint32(bytesReceivedFromDCP)
+ }
+
+ // 16 LSBits are used by client library to encode vbucket number.
+ // 16 MSBits are left for application to multiplex on opaque value.
+ event.Opaque = appOpaque(rq.Opaque)
+
+ if len(rq.Extras) >= uprMutationExtraLen &&
+ event.Opcode == gomemcached.UPR_MUTATION {
+
+ event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
+ event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
+ event.Flags = binary.BigEndian.Uint32(rq.Extras[16:20])
+ event.Expiry = binary.BigEndian.Uint32(rq.Extras[20:24])
+ event.LockTime = binary.BigEndian.Uint32(rq.Extras[24:28])
+ event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[28:30])
+
+ } else if len(rq.Extras) >= uprDeletetionWithDeletionTimeExtraLen &&
+ event.Opcode == gomemcached.UPR_DELETION {
+
+ event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
+ event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
+ event.Expiry = binary.BigEndian.Uint32(rq.Extras[16:20])
+
+ } else if len(rq.Extras) >= uprDeletetionExtraLen &&
+ event.Opcode == gomemcached.UPR_DELETION ||
+ event.Opcode == gomemcached.UPR_EXPIRATION {
+
+ event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
+ event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
+ event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[16:18])
+
+ } else if len(rq.Extras) >= uprSnapshotExtraLen &&
+ event.Opcode == gomemcached.UPR_SNAPSHOT {
+
+ event.SnapstartSeq = binary.BigEndian.Uint64(rq.Extras[:8])
+ event.SnapendSeq = binary.BigEndian.Uint64(rq.Extras[8:16])
+ event.SnapshotType = binary.BigEndian.Uint32(rq.Extras[16:20])
+ }
+
+ return event
+}
+
+func (event *UprEvent) String() string {
+ name := gomemcached.CommandNames[event.Opcode]
+ if name == "" {
+ name = fmt.Sprintf("#%d", event.Opcode)
+ }
+ return name
+}
+
+func (event *UprEvent) IsSnappyDataType() bool {
+ return event.Opcode == gomemcached.UPR_MUTATION && (event.DataType&SnappyDataType > 0)
+}
+
+func (feed *UprFeed) sendCommands(mc *Client) {
+ transmitCh := feed.transmitCh
+ transmitCl := feed.transmitCl
+loop:
+ for {
+ select {
+ case command := <-transmitCh:
+ if err := mc.Transmit(command); err != nil {
+ logging.Errorf("Failed to transmit command %s. Error %s", command.Opcode.String(), err.Error())
+ // get feed to close and runFeed routine to exit
+ feed.Close()
+ break loop
+ }
+
+ case <-transmitCl:
+ break loop
+ }
+ }
+
+ // After sendCommands exits, write to transmitCh will block forever
+ // when we write to transmitCh, e.g., at CloseStream(), we need to check feed closure to have an exit route
+
+ logging.Infof("sendCommands exiting")
+}
+
+// Sets the specified stream as the connected stream for this vbno, and also cleans up negotiator
+func (feed *UprFeed) activateStream(vbno, opaque uint16, stream *UprStream) error {
+ feed.muVbstreams.Lock()
+ defer feed.muVbstreams.Unlock()
+
+ // Set this stream as the officially connected stream for this vb
+ stream.connected = true
+ feed.vbstreams[vbno] = stream
+ return nil
+}
+
+func (feed *UprFeed) cleanUpVbStream(vbno uint16) {
+ feed.muVbstreams.Lock()
+ defer feed.muVbstreams.Unlock()
+
+ delete(feed.vbstreams, vbno)
+}
+
+// NewUprFeed creates a new UPR Feed.
+// TODO: Describe side-effects on bucket instance and its connection pool.
+func (mc *Client) NewUprFeed() (*UprFeed, error) {
+ return mc.NewUprFeedWithConfig(false /*ackByClient*/)
+}
+
+func (mc *Client) NewUprFeedWithConfig(ackByClient bool) (*UprFeed, error) {
+
+ feed := &UprFeed{
+ conn: mc,
+ closer: make(chan bool, 1),
+ vbstreams: make(map[uint16]*UprStream),
+ transmitCh: make(chan *gomemcached.MCRequest),
+ transmitCl: make(chan bool),
+ ackByClient: ackByClient,
+ }
+
+ feed.negotiator.initialize()
+
+ go feed.sendCommands(mc)
+ return feed, nil
+}
+
+func (mc *Client) NewUprFeedIface() (UprFeedIface, error) {
+ return mc.NewUprFeed()
+}
+
+func (mc *Client) NewUprFeedWithConfigIface(ackByClient bool) (UprFeedIface, error) {
+ return mc.NewUprFeedWithConfig(ackByClient)
+}
+
+func doUprOpen(mc *Client, name string, sequence uint32, features UprFeatures) error {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_OPEN,
+ Key: []byte(name),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+
+ rq.Extras = make([]byte, 8)
+ binary.BigEndian.PutUint32(rq.Extras[:4], sequence)
+
+ // opens a producer type connection
+ flags := gomemcached.DCP_PRODUCER
+ if features.Xattribute {
+ flags = flags | gomemcached.DCP_OPEN_INCLUDE_XATTRS
+ }
+ if features.IncludeDeletionTime {
+ flags = flags | gomemcached.DCP_OPEN_INCLUDE_DELETE_TIMES
+ }
+ binary.BigEndian.PutUint32(rq.Extras[4:], flags)
+
+ return sendMcRequestSync(mc, rq)
+}
+
+// Synchronously send a memcached request and wait for the response
+func sendMcRequestSync(mc *Client, req *gomemcached.MCRequest) error {
+ if err := mc.Transmit(req); err != nil {
+ return err
+ }
+
+ if res, err := mc.Receive(); err != nil {
+ return err
+ } else if req.Opcode != res.Opcode {
+ return fmt.Errorf("unexpected #opcode sent %v received %v", req.Opcode, res.Opaque)
+ } else if req.Opaque != res.Opaque {
+ return fmt.Errorf("opaque mismatch, sent %v received %v", req.Opaque, res.Opaque)
+ } else if res.Status != gomemcached.SUCCESS {
+ return fmt.Errorf("error %v", res.Status)
+ }
+ return nil
+}
+
+// UprOpen to connect with a UPR producer.
+// Name: name of te UPR connection
+// sequence: sequence number for the connection
+// bufsize: max size of the application
+func (feed *UprFeed) UprOpen(name string, sequence uint32, bufSize uint32) error {
+ var allFeaturesDisabled UprFeatures
+ err, _ := feed.uprOpen(name, sequence, bufSize, allFeaturesDisabled)
+ return err
+}
+
+// UprOpen with XATTR enabled.
+func (feed *UprFeed) UprOpenWithXATTR(name string, sequence uint32, bufSize uint32) error {
+ var onlyXattrEnabled UprFeatures
+ onlyXattrEnabled.Xattribute = true
+ err, _ := feed.uprOpen(name, sequence, bufSize, onlyXattrEnabled)
+ return err
+}
+
+func (feed *UprFeed) UprOpenWithFeatures(name string, sequence uint32, bufSize uint32, features UprFeatures) (error, UprFeatures) {
+ return feed.uprOpen(name, sequence, bufSize, features)
+}
+
+func (feed *UprFeed) SetPriorityAsync(p PriorityType) error {
+ if !feed.isOpen() {
+ // do not send this command if upr feed is not yet open, otherwise it may interfere with
+ // feed start up process, which relies on synchronous message exchange with DCP.
+ return fmt.Errorf("Upr feed is not open. State=%v", feed.getState())
+ }
+
+ return feed.setPriority(p, false /*sync*/)
+}
+
+func (feed *UprFeed) setPriority(p PriorityType, sync bool) error {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("set_priority"),
+ Body: []byte(p),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ if sync {
+ return sendMcRequestSync(feed.conn, rq)
+ } else {
+ return feed.writeToTransmitCh(rq)
+
+ }
+}
+
+func (feed *UprFeed) uprOpen(name string, sequence uint32, bufSize uint32, features UprFeatures) (err error, activatedFeatures UprFeatures) {
+ mc := feed.conn
+
+ // First set this to an invalid value to state that the method hasn't gotten to executing this control yet
+ activatedFeatures.CompressionType = CompressionTypeEndMarker
+
+ if err = doUprOpen(mc, name, sequence, features); err != nil {
+ return
+ }
+
+ activatedFeatures.Xattribute = features.Xattribute
+
+ // send a UPR control message to set the window size for the this connection
+ if bufSize > 0 {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("connection_buffer_size"),
+ Body: []byte(strconv.Itoa(int(bufSize))),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+ feed.maxAckBytes = uint32(bufferAckThreshold * float32(bufSize))
+ }
+
+ // enable noop and set noop interval
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("enable_noop"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+
+ rq = &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("set_noop_interval"),
+ Body: []byte(strconv.Itoa(int(uprDefaultNoopInterval))),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+
+ if features.CompressionType == CompressionTypeSnappy {
+ activatedFeatures.CompressionType = CompressionTypeNone
+ rq = &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("force_value_compression"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ } else if features.CompressionType == CompressionTypeEndMarker {
+ err = fmt.Errorf("UPR_CONTROL Failed - Invalid CompressionType: %v", features.CompressionType)
+ }
+ if err != nil {
+ return
+ }
+ activatedFeatures.CompressionType = features.CompressionType
+
+ if features.DcpPriority != PriorityDisabled {
+ err = feed.setPriority(features.DcpPriority, true /*sync*/)
+ if err == nil {
+ activatedFeatures.DcpPriority = features.DcpPriority
+ } else {
+ return
+ }
+ }
+
+ if features.EnableExpiry {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CONTROL,
+ Key: []byte("enable_expiry_opcode"),
+ Body: []byte("true"),
+ Opaque: getUprOpenCtrlOpaque(),
+ }
+ err = sendMcRequestSync(feed.conn, rq)
+ if err != nil {
+ return
+ }
+ activatedFeatures.EnableExpiry = true
+ }
+
+ // everything is ok so far, set upr feed to open state
+ feed.setOpen()
+ return
+}
+
+// UprGetFailoverLog for given list of vbuckets.
+func (mc *Client) UprGetFailoverLog(
+ vb []uint16) (map[uint16]*FailoverLog, error) {
+
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_FAILOVERLOG,
+ Opaque: opaqueFailover,
+ }
+
+ var allFeaturesDisabled UprFeatures
+ if err := doUprOpen(mc, "FailoverLog", 0, allFeaturesDisabled); err != nil {
+ return nil, fmt.Errorf("UPR_OPEN Failed %s", err.Error())
+ }
+
+ failoverLogs := make(map[uint16]*FailoverLog)
+ for _, vBucket := range vb {
+ rq.VBucket = vBucket
+ if err := mc.Transmit(rq); err != nil {
+ return nil, err
+ }
+ res, err := mc.Receive()
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to receive %s", err.Error())
+ } else if res.Opcode != gomemcached.UPR_FAILOVERLOG || res.Status != gomemcached.SUCCESS {
+ return nil, fmt.Errorf("unexpected #opcode %v", res.Opcode)
+ }
+
+ flog, err := parseFailoverLog(res.Body)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse failover logs for vb %d", vb)
+ }
+ failoverLogs[vBucket] = flog
+ }
+
+ return failoverLogs, nil
+}
+
+// UprRequestStream for a single vbucket.
+func (feed *UprFeed) UprRequestStream(vbno, opaqueMSB uint16, flags uint32,
+ vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
+
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_STREAMREQ,
+ VBucket: vbno,
+ Opaque: composeOpaque(vbno, opaqueMSB),
+ }
+
+ rq.Extras = make([]byte, 48) // #Extras
+ binary.BigEndian.PutUint32(rq.Extras[:4], flags)
+ binary.BigEndian.PutUint32(rq.Extras[4:8], uint32(0))
+ binary.BigEndian.PutUint64(rq.Extras[8:16], startSequence)
+ binary.BigEndian.PutUint64(rq.Extras[16:24], endSequence)
+ binary.BigEndian.PutUint64(rq.Extras[24:32], vuuid)
+ binary.BigEndian.PutUint64(rq.Extras[32:40], snapStart)
+ binary.BigEndian.PutUint64(rq.Extras[40:48], snapEnd)
+
+ feed.negotiator.registerRequest(vbno, opaqueMSB, vuuid, startSequence, endSequence)
+ // Any client that has ever called this method, regardless of return code,
+ // should expect a potential UPR_CLOSESTREAM message due to this new map entry prior to Transmit.
+
+ if err := feed.conn.Transmit(rq); err != nil {
+ logging.Errorf("Error in StreamRequest %s", err.Error())
+ // If an error occurs during transmit, then the UPRFeed will keep the stream
+ // in the vbstreams map. This is to prevent nil lookup from any previously
+ // sent stream requests.
+ return err
+ }
+
+ return nil
+}
+
+// CloseStream for specified vbucket.
+func (feed *UprFeed) CloseStream(vbno, opaqueMSB uint16) error {
+
+ err := feed.validateCloseStream(vbno)
+ if err != nil {
+ logging.Infof("CloseStream for %v has been skipped because of error %v", vbno, err)
+ return err
+ }
+
+ closeStream := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_CLOSESTREAM,
+ VBucket: vbno,
+ Opaque: composeOpaque(vbno, opaqueMSB),
+ }
+
+ feed.writeToTransmitCh(closeStream)
+
+ return nil
+}
+
+func (feed *UprFeed) GetUprEventCh() <-chan *UprEvent {
+ return feed.C
+}
+
+func (feed *UprFeed) GetError() error {
+ return feed.Error
+}
+
+func (feed *UprFeed) validateCloseStream(vbno uint16) error {
+ feed.muVbstreams.RLock()
+ nilVbStream := feed.vbstreams[vbno] == nil
+ feed.muVbstreams.RUnlock()
+
+ if nilVbStream && (feed.negotiator.getStreamsCntFromMap(vbno) == 0) {
+ return fmt.Errorf("Stream for vb %d has not been requested", vbno)
+ }
+
+ return nil
+}
+
+func (feed *UprFeed) writeToTransmitCh(rq *gomemcached.MCRequest) error {
+ // write to transmitCh may block forever if sendCommands has exited
+ // check for feed closure to have an exit route in this case
+ select {
+ case <-feed.closer:
+ errMsg := fmt.Sprintf("Abort sending request to transmitCh because feed has been closed. request=%v", rq)
+ logging.Infof(errMsg)
+ return errors.New(errMsg)
+ case feed.transmitCh <- rq:
+ }
+ return nil
+}
+
+// StartFeed to start the upper feed.
+func (feed *UprFeed) StartFeed() error {
+ return feed.StartFeedWithConfig(10)
+}
+
+func (feed *UprFeed) StartFeedWithConfig(datachan_len int) error {
+ ch := make(chan *UprEvent, datachan_len)
+ feed.C = ch
+ go feed.runFeed(ch)
+ return nil
+}
+
+func parseFailoverLog(body []byte) (*FailoverLog, error) {
+
+ if len(body)%16 != 0 {
+ err := fmt.Errorf("invalid body length %v, in failover-log", len(body))
+ return nil, err
+ }
+ log := make(FailoverLog, len(body)/16)
+ for i, j := 0, 0; i < len(body); i += 16 {
+ vuuid := binary.BigEndian.Uint64(body[i : i+8])
+ seqno := binary.BigEndian.Uint64(body[i+8 : i+16])
+ log[j] = [2]uint64{vuuid, seqno}
+ j++
+ }
+ return &log, nil
+}
+
+func handleStreamRequest(
+ res *gomemcached.MCResponse,
+ headerBuf []byte,
+) (gomemcached.Status, uint64, *FailoverLog, error) {
+
+ var rollback uint64
+ var err error
+
+ switch {
+ case res.Status == gomemcached.ROLLBACK:
+ logging.Infof("Rollback response. body=%v, headerBuf=%v\n", res.Body, headerBuf)
+ rollback = binary.BigEndian.Uint64(res.Body)
+ logging.Infof("Rollback seqno is %v for response with opaque %v\n", rollback, res.Opaque)
+ return res.Status, rollback, nil, nil
+
+ case res.Status != gomemcached.SUCCESS:
+ err = fmt.Errorf("unexpected status %v for response with opaque %v", res.Status, res.Opaque)
+ return res.Status, 0, nil, err
+ }
+
+ flog, err := parseFailoverLog(res.Body[:])
+ return res.Status, rollback, flog, err
+}
+
+// generate stream end responses for all active vb streams
+func (feed *UprFeed) doStreamClose(ch chan *UprEvent) {
+ feed.muVbstreams.RLock()
+
+ uprEvents := make([]*UprEvent, len(feed.vbstreams))
+ index := 0
+ for vbno, stream := range feed.vbstreams {
+ uprEvent := &UprEvent{
+ VBucket: vbno,
+ VBuuid: stream.Vbuuid,
+ Opcode: gomemcached.UPR_STREAMEND,
+ }
+ uprEvents[index] = uprEvent
+ index++
+ }
+
+ // release the lock before sending uprEvents to ch, which may block
+ feed.muVbstreams.RUnlock()
+
+loop:
+ for _, uprEvent := range uprEvents {
+ select {
+ case ch <- uprEvent:
+ case <-feed.closer:
+ logging.Infof("Feed has been closed. Aborting doStreamClose.")
+ break loop
+ }
+ }
+}
+
+func (feed *UprFeed) runFeed(ch chan *UprEvent) {
+ defer close(ch)
+ var headerBuf [gomemcached.HDR_LEN]byte
+ var pkt gomemcached.MCRequest
+ var event *UprEvent
+
+ mc := feed.conn.Hijack()
+ uprStats := &feed.stats
+
+loop:
+ for {
+ select {
+ case <-feed.closer:
+ logging.Infof("Feed has been closed. Exiting.")
+ break loop
+ default:
+ bytes, err := pkt.Receive(mc, headerBuf[:])
+ if err != nil {
+ logging.Errorf("Error in receive %s", err.Error())
+ feed.Error = err
+ // send all the stream close messages to the client
+ feed.doStreamClose(ch)
+ break loop
+ } else {
+ event = nil
+ res := &gomemcached.MCResponse{
+ Opcode: pkt.Opcode,
+ Cas: pkt.Cas,
+ Opaque: pkt.Opaque,
+ Status: gomemcached.Status(pkt.VBucket),
+ Extras: pkt.Extras,
+ Key: pkt.Key,
+ Body: pkt.Body,
+ }
+
+ vb := vbOpaque(pkt.Opaque)
+ appOpaque := appOpaque(pkt.Opaque)
+ uprStats.TotalBytes = uint64(bytes)
+
+ feed.muVbstreams.RLock()
+ stream := feed.vbstreams[vb]
+ feed.muVbstreams.RUnlock()
+
+ switch pkt.Opcode {
+ case gomemcached.UPR_STREAMREQ:
+ event, err = feed.negotiator.handleStreamRequest(feed, headerBuf, &pkt, bytes, res)
+ if err != nil {
+ logging.Infof(err.Error())
+ break loop
+ }
+ case gomemcached.UPR_MUTATION,
+ gomemcached.UPR_DELETION,
+ gomemcached.UPR_EXPIRATION:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ break loop
+ }
+ event = makeUprEvent(pkt, stream, bytes)
+ uprStats.TotalMutation++
+
+ case gomemcached.UPR_STREAMEND:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ break loop
+ }
+ //stream has ended
+ event = makeUprEvent(pkt, stream, bytes)
+ logging.Infof("Stream Ended for vb %d", vb)
+
+ feed.negotiator.deleteStreamFromMap(vb, appOpaque)
+ feed.cleanUpVbStream(vb)
+
+ case gomemcached.UPR_SNAPSHOT:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ break loop
+ }
+ // snapshot marker
+ event = makeUprEvent(pkt, stream, bytes)
+ uprStats.TotalSnapShot++
+
+ case gomemcached.UPR_FLUSH:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ break loop
+ }
+ // special processing for flush ?
+ event = makeUprEvent(pkt, stream, bytes)
+
+ case gomemcached.UPR_CLOSESTREAM:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ break loop
+ }
+ event = makeUprEvent(pkt, stream, bytes)
+ event.Opcode = gomemcached.UPR_STREAMEND // opcode re-write !!
+ logging.Infof("Stream Closed for vb %d StreamEnd simulated", vb)
+
+ feed.negotiator.deleteStreamFromMap(vb, appOpaque)
+ feed.cleanUpVbStream(vb)
+
+ case gomemcached.UPR_ADDSTREAM:
+ logging.Infof("Opcode %v not implemented", pkt.Opcode)
+
+ case gomemcached.UPR_CONTROL, gomemcached.UPR_BUFFERACK:
+ if res.Status != gomemcached.SUCCESS {
+ logging.Infof("Opcode %v received status %d", pkt.Opcode.String(), res.Status)
+ }
+
+ case gomemcached.UPR_NOOP:
+ // send a NOOP back
+ noop := &gomemcached.MCResponse{
+ Opcode: gomemcached.UPR_NOOP,
+ Opaque: pkt.Opaque,
+ }
+
+ if err := feed.conn.TransmitResponse(noop); err != nil {
+ logging.Warnf("failed to transmit command %s. Error %s", noop.Opcode.String(), err.Error())
+ }
+ default:
+ logging.Infof("Recived an unknown response for vbucket %d", vb)
+ }
+ }
+
+ if event != nil {
+ select {
+ case ch <- event:
+ case <-feed.closer:
+ logging.Infof("Feed has been closed. Skip sending events. Exiting.")
+ break loop
+ }
+
+ feed.muVbstreams.RLock()
+ l := len(feed.vbstreams)
+ feed.muVbstreams.RUnlock()
+
+ if event.Opcode == gomemcached.UPR_CLOSESTREAM && l == 0 {
+ logging.Infof("No more streams")
+ }
+ }
+
+ if !feed.ackByClient {
+ // if client does not ack, do the ack check now
+ feed.sendBufferAckIfNeeded(event)
+ }
+ }
+ }
+
+ // make sure that feed is closed before we signal transmitCl and exit runFeed
+ feed.Close()
+
+ close(feed.transmitCl)
+ logging.Infof("runFeed exiting")
+}
+
+// Client, after completing processing of an UprEvent, need to call this API to notify UprFeed,
+// so that UprFeed can update its ack bytes stats and send ack to DCP if needed
+// Client needs to set ackByClient flag to true in NewUprFeedWithConfig() call as a prerequisite for this call to work
+// This API is not thread safe. Caller should NOT have more than one go rountine calling this API
+func (feed *UprFeed) ClientAck(event *UprEvent) error {
+ if !feed.ackByClient {
+ return errors.New("Upr feed does not have ackByclient flag set")
+ }
+ feed.sendBufferAckIfNeeded(event)
+ return nil
+}
+
+// increment ack bytes if the event needs to be acked to DCP
+// send buffer ack if enough ack bytes have been accumulated
+func (feed *UprFeed) sendBufferAckIfNeeded(event *UprEvent) {
+ if event == nil || event.AckSize == 0 {
+ // this indicates that there is no need to ack to DCP
+ return
+ }
+
+ totalBytes := feed.toAckBytes + event.AckSize
+ if totalBytes > feed.maxAckBytes {
+ feed.toAckBytes = 0
+ feed.sendBufferAck(totalBytes)
+ } else {
+ feed.toAckBytes = totalBytes
+ }
+}
+
+// send buffer ack to dcp
+func (feed *UprFeed) sendBufferAck(sendSize uint32) {
+ bufferAck := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_BUFFERACK,
+ }
+ bufferAck.Extras = make([]byte, 4)
+ binary.BigEndian.PutUint32(bufferAck.Extras[:4], uint32(sendSize))
+ feed.writeToTransmitCh(bufferAck)
+ feed.stats.TotalBufferAckSent++
+}
+
+func (feed *UprFeed) GetUprStats() *UprStats {
+ return &feed.stats
+}
+
+func composeOpaque(vbno, opaqueMSB uint16) uint32 {
+ return (uint32(opaqueMSB) << 16) | uint32(vbno)
+}
+
+func getUprOpenCtrlOpaque() uint32 {
+ return atomic.AddUint32(&opaqueOpenCtrlWell, 1)
+}
+
+func appOpaque(opq32 uint32) uint16 {
+ return uint16((opq32 & 0xFFFF0000) >> 16)
+}
+
+func vbOpaque(opq32 uint32) uint16 {
+ return uint16(opq32 & 0xFFFF)
+}
+
+// Close this UprFeed.
+func (feed *UprFeed) Close() {
+ feed.muFeedState.Lock()
+ defer feed.muFeedState.Unlock()
+ if feed.feedState != FeedStateClosed {
+ close(feed.closer)
+ feed.feedState = FeedStateClosed
+ feed.negotiator.initialize()
+ }
+}
+
+// check if the UprFeed has been closed
+func (feed *UprFeed) Closed() bool {
+ feed.muFeedState.RLock()
+ defer feed.muFeedState.RUnlock()
+ return feed.feedState == FeedStateClosed
+}
+
+// set upr feed to opened state after initialization is done
+func (feed *UprFeed) setOpen() {
+ feed.muFeedState.Lock()
+ defer feed.muFeedState.Unlock()
+ feed.feedState = FeedStateOpened
+}
+
+func (feed *UprFeed) isOpen() bool {
+ feed.muFeedState.RLock()
+ defer feed.muFeedState.RUnlock()
+ return feed.feedState == FeedStateOpened
+}
+
+func (feed *UprFeed) getState() FeedState {
+ feed.muFeedState.RLock()
+ defer feed.muFeedState.RUnlock()
+ return feed.feedState
+}
diff --git a/vendor/github.com/couchbase/gomemcached/mc_constants.go b/vendor/github.com/couchbase/gomemcached/mc_constants.go
new file mode 100644
index 0000000..32f4f51
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/mc_constants.go
@@ -0,0 +1,345 @@
+// Package gomemcached is binary protocol packet formats and constants.
+package gomemcached
+
+import (
+ "fmt"
+)
+
+const (
+ REQ_MAGIC = 0x80
+ RES_MAGIC = 0x81
+)
+
+// CommandCode for memcached packets.
+type CommandCode uint8
+
+const (
+ GET = CommandCode(0x00)
+ SET = CommandCode(0x01)
+ ADD = CommandCode(0x02)
+ REPLACE = CommandCode(0x03)
+ DELETE = CommandCode(0x04)
+ INCREMENT = CommandCode(0x05)
+ DECREMENT = CommandCode(0x06)
+ QUIT = CommandCode(0x07)
+ FLUSH = CommandCode(0x08)
+ GETQ = CommandCode(0x09)
+ NOOP = CommandCode(0x0a)
+ VERSION = CommandCode(0x0b)
+ GETK = CommandCode(0x0c)
+ GETKQ = CommandCode(0x0d)
+ APPEND = CommandCode(0x0e)
+ PREPEND = CommandCode(0x0f)
+ STAT = CommandCode(0x10)
+ SETQ = CommandCode(0x11)
+ ADDQ = CommandCode(0x12)
+ REPLACEQ = CommandCode(0x13)
+ DELETEQ = CommandCode(0x14)
+ INCREMENTQ = CommandCode(0x15)
+ DECREMENTQ = CommandCode(0x16)
+ QUITQ = CommandCode(0x17)
+ FLUSHQ = CommandCode(0x18)
+ APPENDQ = CommandCode(0x19)
+ AUDIT = CommandCode(0x27)
+ PREPENDQ = CommandCode(0x1a)
+ GAT = CommandCode(0x1d)
+ HELLO = CommandCode(0x1f)
+ RGET = CommandCode(0x30)
+ RSET = CommandCode(0x31)
+ RSETQ = CommandCode(0x32)
+ RAPPEND = CommandCode(0x33)
+ RAPPENDQ = CommandCode(0x34)
+ RPREPEND = CommandCode(0x35)
+ RPREPENDQ = CommandCode(0x36)
+ RDELETE = CommandCode(0x37)
+ RDELETEQ = CommandCode(0x38)
+ RINCR = CommandCode(0x39)
+ RINCRQ = CommandCode(0x3a)
+ RDECR = CommandCode(0x3b)
+ RDECRQ = CommandCode(0x3c)
+
+ SASL_LIST_MECHS = CommandCode(0x20)
+ SASL_AUTH = CommandCode(0x21)
+ SASL_STEP = CommandCode(0x22)
+
+ SET_VBUCKET = CommandCode(0x3d)
+
+ TAP_CONNECT = CommandCode(0x40) // Client-sent request to initiate Tap feed
+ TAP_MUTATION = CommandCode(0x41) // Notification of a SET/ADD/REPLACE/etc. on the server
+ TAP_DELETE = CommandCode(0x42) // Notification of a DELETE on the server
+ TAP_FLUSH = CommandCode(0x43) // Replicates a flush_all command
+ TAP_OPAQUE = CommandCode(0x44) // Opaque control data from the engine
+ TAP_VBUCKET_SET = CommandCode(0x45) // Sets state of vbucket in receiver (used in takeover)
+ TAP_CHECKPOINT_START = CommandCode(0x46) // Notifies start of new checkpoint
+ TAP_CHECKPOINT_END = CommandCode(0x47) // Notifies end of checkpoint
+
+ UPR_OPEN = CommandCode(0x50) // Open a UPR connection with a name
+ UPR_ADDSTREAM = CommandCode(0x51) // Sent by ebucketMigrator to UPR Consumer
+ UPR_CLOSESTREAM = CommandCode(0x52) // Sent by eBucketMigrator to UPR Consumer
+ UPR_FAILOVERLOG = CommandCode(0x54) // Request failover logs
+ UPR_STREAMREQ = CommandCode(0x53) // Stream request from consumer to producer
+ UPR_STREAMEND = CommandCode(0x55) // Sent by producer when it has no more messages to stream
+ UPR_SNAPSHOT = CommandCode(0x56) // Start of a new snapshot
+ UPR_MUTATION = CommandCode(0x57) // Key mutation
+ UPR_DELETION = CommandCode(0x58) // Key deletion
+ UPR_EXPIRATION = CommandCode(0x59) // Key expiration
+ UPR_FLUSH = CommandCode(0x5a) // Delete all the data for a vbucket
+ UPR_NOOP = CommandCode(0x5c) // UPR NOOP
+ UPR_BUFFERACK = CommandCode(0x5d) // UPR Buffer Acknowledgement
+ UPR_CONTROL = CommandCode(0x5e) // Set flow control params
+
+ SELECT_BUCKET = CommandCode(0x89) // Select bucket
+
+ OBSERVE_SEQNO = CommandCode(0x91) // Sequence Number based Observe
+ OBSERVE = CommandCode(0x92)
+
+ GET_META = CommandCode(0xA0) // Get meta. returns with expiry, flags, cas etc
+ GET_COLLECTIONS_MANIFEST = CommandCode(0xba) // Get entire collections manifest.
+ COLLECTIONS_GET_CID = CommandCode(0xbb) // Get collection id.
+ SUBDOC_GET = CommandCode(0xc5) // Get subdoc. Returns with xattrs
+ SUBDOC_MULTI_LOOKUP = CommandCode(0xd0) // Multi lookup. Doc xattrs and meta.
+
+)
+
+// command codes that are counted toward DCP control buffer
+// when DCP clients receive DCP messages with these command codes, they need to provide acknowledgement
+var BufferedCommandCodeMap = map[CommandCode]bool{
+ SET_VBUCKET: true,
+ UPR_STREAMEND: true,
+ UPR_SNAPSHOT: true,
+ UPR_MUTATION: true,
+ UPR_DELETION: true,
+ UPR_EXPIRATION: true}
+
+// Status field for memcached response.
+type Status uint16
+
+// Matches with protocol_binary.h as source of truth
+const (
+ SUCCESS = Status(0x00)
+ KEY_ENOENT = Status(0x01)
+ KEY_EEXISTS = Status(0x02)
+ E2BIG = Status(0x03)
+ EINVAL = Status(0x04)
+ NOT_STORED = Status(0x05)
+ DELTA_BADVAL = Status(0x06)
+ NOT_MY_VBUCKET = Status(0x07)
+ NO_BUCKET = Status(0x08)
+ LOCKED = Status(0x09)
+ AUTH_STALE = Status(0x1f)
+ AUTH_ERROR = Status(0x20)
+ AUTH_CONTINUE = Status(0x21)
+ ERANGE = Status(0x22)
+ ROLLBACK = Status(0x23)
+ EACCESS = Status(0x24)
+ NOT_INITIALIZED = Status(0x25)
+ UNKNOWN_COMMAND = Status(0x81)
+ ENOMEM = Status(0x82)
+ NOT_SUPPORTED = Status(0x83)
+ EINTERNAL = Status(0x84)
+ EBUSY = Status(0x85)
+ TMPFAIL = Status(0x86)
+ UNKNOWN_COLLECTION = Status(0x88)
+
+ SYNC_WRITE_IN_PROGRESS = Status(0xa2)
+ SYNC_WRITE_AMBIGUOUS = Status(0xa3)
+
+ // SUBDOC
+ SUBDOC_PATH_NOT_FOUND = Status(0xc0)
+ SUBDOC_BAD_MULTI = Status(0xcc)
+ SUBDOC_MULTI_PATH_FAILURE_DELETED = Status(0xd3)
+)
+
+// for log redaction
+const (
+ UdTagBegin = ""
+ UdTagEnd = ""
+)
+
+var isFatal = map[Status]bool{
+ DELTA_BADVAL: true,
+ NO_BUCKET: true,
+ AUTH_STALE: true,
+ AUTH_ERROR: true,
+ ERANGE: true,
+ ROLLBACK: true,
+ EACCESS: true,
+ ENOMEM: true,
+ NOT_SUPPORTED: true,
+}
+
+// the producer/consumer bit in dcp flags
+var DCP_PRODUCER uint32 = 0x01
+
+// the include XATTRS bit in dcp flags
+var DCP_OPEN_INCLUDE_XATTRS uint32 = 0x04
+
+// the include deletion time bit in dcp flags
+var DCP_OPEN_INCLUDE_DELETE_TIMES uint32 = 0x20
+
+// Datatype to Include XATTRS in SUBDOC GET
+var SUBDOC_FLAG_XATTR uint8 = 0x04
+
+// MCItem is an internal representation of an item.
+type MCItem struct {
+ Cas uint64
+ Flags, Expiration uint32
+ Data []byte
+}
+
+// Number of bytes in a binary protocol header.
+const HDR_LEN = 24
+
+// Mapping of CommandCode -> name of command (not exhaustive)
+var CommandNames map[CommandCode]string
+
+// StatusNames human readable names for memcached response.
+var StatusNames map[Status]string
+
+func init() {
+ CommandNames = make(map[CommandCode]string)
+ CommandNames[GET] = "GET"
+ CommandNames[SET] = "SET"
+ CommandNames[ADD] = "ADD"
+ CommandNames[REPLACE] = "REPLACE"
+ CommandNames[DELETE] = "DELETE"
+ CommandNames[INCREMENT] = "INCREMENT"
+ CommandNames[DECREMENT] = "DECREMENT"
+ CommandNames[QUIT] = "QUIT"
+ CommandNames[FLUSH] = "FLUSH"
+ CommandNames[GETQ] = "GETQ"
+ CommandNames[NOOP] = "NOOP"
+ CommandNames[VERSION] = "VERSION"
+ CommandNames[GETK] = "GETK"
+ CommandNames[GETKQ] = "GETKQ"
+ CommandNames[APPEND] = "APPEND"
+ CommandNames[PREPEND] = "PREPEND"
+ CommandNames[STAT] = "STAT"
+ CommandNames[SETQ] = "SETQ"
+ CommandNames[ADDQ] = "ADDQ"
+ CommandNames[REPLACEQ] = "REPLACEQ"
+ CommandNames[DELETEQ] = "DELETEQ"
+ CommandNames[INCREMENTQ] = "INCREMENTQ"
+ CommandNames[DECREMENTQ] = "DECREMENTQ"
+ CommandNames[QUITQ] = "QUITQ"
+ CommandNames[FLUSHQ] = "FLUSHQ"
+ CommandNames[APPENDQ] = "APPENDQ"
+ CommandNames[PREPENDQ] = "PREPENDQ"
+ CommandNames[RGET] = "RGET"
+ CommandNames[RSET] = "RSET"
+ CommandNames[RSETQ] = "RSETQ"
+ CommandNames[RAPPEND] = "RAPPEND"
+ CommandNames[RAPPENDQ] = "RAPPENDQ"
+ CommandNames[RPREPEND] = "RPREPEND"
+ CommandNames[RPREPENDQ] = "RPREPENDQ"
+ CommandNames[RDELETE] = "RDELETE"
+ CommandNames[RDELETEQ] = "RDELETEQ"
+ CommandNames[RINCR] = "RINCR"
+ CommandNames[RINCRQ] = "RINCRQ"
+ CommandNames[RDECR] = "RDECR"
+ CommandNames[RDECRQ] = "RDECRQ"
+
+ CommandNames[SASL_LIST_MECHS] = "SASL_LIST_MECHS"
+ CommandNames[SASL_AUTH] = "SASL_AUTH"
+ CommandNames[SASL_STEP] = "SASL_STEP"
+
+ CommandNames[TAP_CONNECT] = "TAP_CONNECT"
+ CommandNames[TAP_MUTATION] = "TAP_MUTATION"
+ CommandNames[TAP_DELETE] = "TAP_DELETE"
+ CommandNames[TAP_FLUSH] = "TAP_FLUSH"
+ CommandNames[TAP_OPAQUE] = "TAP_OPAQUE"
+ CommandNames[TAP_VBUCKET_SET] = "TAP_VBUCKET_SET"
+ CommandNames[TAP_CHECKPOINT_START] = "TAP_CHECKPOINT_START"
+ CommandNames[TAP_CHECKPOINT_END] = "TAP_CHECKPOINT_END"
+
+ CommandNames[UPR_OPEN] = "UPR_OPEN"
+ CommandNames[UPR_ADDSTREAM] = "UPR_ADDSTREAM"
+ CommandNames[UPR_CLOSESTREAM] = "UPR_CLOSESTREAM"
+ CommandNames[UPR_FAILOVERLOG] = "UPR_FAILOVERLOG"
+ CommandNames[UPR_STREAMREQ] = "UPR_STREAMREQ"
+ CommandNames[UPR_STREAMEND] = "UPR_STREAMEND"
+ CommandNames[UPR_SNAPSHOT] = "UPR_SNAPSHOT"
+ CommandNames[UPR_MUTATION] = "UPR_MUTATION"
+ CommandNames[UPR_DELETION] = "UPR_DELETION"
+ CommandNames[UPR_EXPIRATION] = "UPR_EXPIRATION"
+ CommandNames[UPR_FLUSH] = "UPR_FLUSH"
+ CommandNames[UPR_NOOP] = "UPR_NOOP"
+ CommandNames[UPR_BUFFERACK] = "UPR_BUFFERACK"
+ CommandNames[UPR_CONTROL] = "UPR_CONTROL"
+ CommandNames[SUBDOC_GET] = "SUBDOC_GET"
+ CommandNames[SUBDOC_MULTI_LOOKUP] = "SUBDOC_MULTI_LOOKUP"
+ CommandNames[GET_COLLECTIONS_MANIFEST] = "GET_COLLECTIONS_MANIFEST"
+ CommandNames[COLLECTIONS_GET_CID] = "COLLECTIONS_GET_CID"
+
+ StatusNames = make(map[Status]string)
+ StatusNames[SUCCESS] = "SUCCESS"
+ StatusNames[KEY_ENOENT] = "KEY_ENOENT"
+ StatusNames[KEY_EEXISTS] = "KEY_EEXISTS"
+ StatusNames[E2BIG] = "E2BIG"
+ StatusNames[EINVAL] = "EINVAL"
+ StatusNames[NOT_STORED] = "NOT_STORED"
+ StatusNames[DELTA_BADVAL] = "DELTA_BADVAL"
+ StatusNames[NOT_MY_VBUCKET] = "NOT_MY_VBUCKET"
+ StatusNames[NO_BUCKET] = "NO_BUCKET"
+ StatusNames[AUTH_STALE] = "AUTH_STALE"
+ StatusNames[AUTH_ERROR] = "AUTH_ERROR"
+ StatusNames[AUTH_CONTINUE] = "AUTH_CONTINUE"
+ StatusNames[ERANGE] = "ERANGE"
+ StatusNames[ROLLBACK] = "ROLLBACK"
+ StatusNames[EACCESS] = "EACCESS"
+ StatusNames[NOT_INITIALIZED] = "NOT_INITIALIZED"
+ StatusNames[UNKNOWN_COMMAND] = "UNKNOWN_COMMAND"
+ StatusNames[ENOMEM] = "ENOMEM"
+ StatusNames[NOT_SUPPORTED] = "NOT_SUPPORTED"
+ StatusNames[EINTERNAL] = "EINTERNAL"
+ StatusNames[EBUSY] = "EBUSY"
+ StatusNames[TMPFAIL] = "TMPFAIL"
+ StatusNames[UNKNOWN_COLLECTION] = "UNKNOWN_COLLECTION"
+ StatusNames[SUBDOC_PATH_NOT_FOUND] = "SUBDOC_PATH_NOT_FOUND"
+ StatusNames[SUBDOC_BAD_MULTI] = "SUBDOC_BAD_MULTI"
+
+}
+
+// String an op code.
+func (o CommandCode) String() (rv string) {
+ rv = CommandNames[o]
+ if rv == "" {
+ rv = fmt.Sprintf("0x%02x", int(o))
+ }
+ return rv
+}
+
+// String an op code.
+func (s Status) String() (rv string) {
+ rv = StatusNames[s]
+ if rv == "" {
+ rv = fmt.Sprintf("0x%02x", int(s))
+ }
+ return rv
+}
+
+// IsQuiet will return true if a command is a "quiet" command.
+func (o CommandCode) IsQuiet() bool {
+ switch o {
+ case GETQ,
+ GETKQ,
+ SETQ,
+ ADDQ,
+ REPLACEQ,
+ DELETEQ,
+ INCREMENTQ,
+ DECREMENTQ,
+ QUITQ,
+ FLUSHQ,
+ APPENDQ,
+ PREPENDQ,
+ RSETQ,
+ RAPPENDQ,
+ RPREPENDQ,
+ RDELETEQ,
+ RINCRQ,
+ RDECRQ:
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/couchbase/gomemcached/mc_req.go b/vendor/github.com/couchbase/gomemcached/mc_req.go
new file mode 100644
index 0000000..3ff67ab
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/mc_req.go
@@ -0,0 +1,197 @@
+package gomemcached
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// The maximum reasonable body length to expect.
+// Anything larger than this will result in an error.
+// The current limit, 20MB, is the size limit supported by ep-engine.
+var MaxBodyLen = int(20 * 1024 * 1024)
+
+// MCRequest is memcached Request
+type MCRequest struct {
+ // The command being issued
+ Opcode CommandCode
+ // The CAS (if applicable, or 0)
+ Cas uint64
+ // An opaque value to be returned with this request
+ Opaque uint32
+ // The vbucket to which this command belongs
+ VBucket uint16
+ // Command extras, key, and body
+ Extras, Key, Body, ExtMeta []byte
+ // Datatype identifier
+ DataType uint8
+}
+
+// Size gives the number of bytes this request requires.
+func (req *MCRequest) Size() int {
+ return HDR_LEN + len(req.Extras) + len(req.Key) + len(req.Body) + len(req.ExtMeta)
+}
+
+// A debugging string representation of this request
+func (req MCRequest) String() string {
+ return fmt.Sprintf("{MCRequest opcode=%s, bodylen=%d, key='%s'}",
+ req.Opcode, len(req.Body), req.Key)
+}
+
+func (req *MCRequest) fillHeaderBytes(data []byte) int {
+
+ pos := 0
+ data[pos] = REQ_MAGIC
+ pos++
+ data[pos] = byte(req.Opcode)
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2],
+ uint16(len(req.Key)))
+ pos += 2
+
+ // 4
+ data[pos] = byte(len(req.Extras))
+ pos++
+ // Data type
+ if req.DataType != 0 {
+ data[pos] = byte(req.DataType)
+ }
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2], req.VBucket)
+ pos += 2
+
+ // 8
+ binary.BigEndian.PutUint32(data[pos:pos+4],
+ uint32(len(req.Body)+len(req.Key)+len(req.Extras)+len(req.ExtMeta)))
+ pos += 4
+
+ // 12
+ binary.BigEndian.PutUint32(data[pos:pos+4], req.Opaque)
+ pos += 4
+
+ // 16
+ if req.Cas != 0 {
+ binary.BigEndian.PutUint64(data[pos:pos+8], req.Cas)
+ }
+ pos += 8
+
+ if len(req.Extras) > 0 {
+ copy(data[pos:pos+len(req.Extras)], req.Extras)
+ pos += len(req.Extras)
+ }
+
+ if len(req.Key) > 0 {
+ copy(data[pos:pos+len(req.Key)], req.Key)
+ pos += len(req.Key)
+ }
+
+ return pos
+}
+
+// HeaderBytes will return the wire representation of the request header
+// (with the extras and key).
+func (req *MCRequest) HeaderBytes() []byte {
+ data := make([]byte, HDR_LEN+len(req.Extras)+len(req.Key))
+
+ req.fillHeaderBytes(data)
+
+ return data
+}
+
+// Bytes will return the wire representation of this request.
+func (req *MCRequest) Bytes() []byte {
+ data := make([]byte, req.Size())
+
+ pos := req.fillHeaderBytes(data)
+
+ if len(req.Body) > 0 {
+ copy(data[pos:pos+len(req.Body)], req.Body)
+ }
+
+ if len(req.ExtMeta) > 0 {
+ copy(data[pos+len(req.Body):pos+len(req.Body)+len(req.ExtMeta)], req.ExtMeta)
+ }
+
+ return data
+}
+
+// Transmit will send this request message across a writer.
+func (req *MCRequest) Transmit(w io.Writer) (n int, err error) {
+ if len(req.Body) < 128 {
+ n, err = w.Write(req.Bytes())
+ } else {
+ n, err = w.Write(req.HeaderBytes())
+ if err == nil {
+ m := 0
+ m, err = w.Write(req.Body)
+ n += m
+ }
+ }
+ return
+}
+
+// Receive will fill this MCRequest with the data from a reader.
+func (req *MCRequest) Receive(r io.Reader, hdrBytes []byte) (int, error) {
+ if len(hdrBytes) < HDR_LEN {
+ hdrBytes = []byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0}
+ }
+ n, err := io.ReadFull(r, hdrBytes)
+ if err != nil {
+ return n, err
+ }
+
+ if hdrBytes[0] != RES_MAGIC && hdrBytes[0] != REQ_MAGIC {
+ return n, fmt.Errorf("bad magic: 0x%02x", hdrBytes[0])
+ }
+
+ klen := int(binary.BigEndian.Uint16(hdrBytes[2:]))
+ elen := int(hdrBytes[4])
+ // Data type at 5
+ req.DataType = uint8(hdrBytes[5])
+
+ req.Opcode = CommandCode(hdrBytes[1])
+ // Vbucket at 6:7
+ req.VBucket = binary.BigEndian.Uint16(hdrBytes[6:])
+ totalBodyLen := int(binary.BigEndian.Uint32(hdrBytes[8:]))
+
+ req.Opaque = binary.BigEndian.Uint32(hdrBytes[12:])
+ req.Cas = binary.BigEndian.Uint64(hdrBytes[16:])
+
+ if totalBodyLen > 0 {
+ buf := make([]byte, totalBodyLen)
+ m, err := io.ReadFull(r, buf)
+ n += m
+ if err == nil {
+ if req.Opcode >= TAP_MUTATION &&
+ req.Opcode <= TAP_CHECKPOINT_END &&
+ len(buf) > 1 {
+ // In these commands there is "engine private"
+ // data at the end of the extras. The first 2
+ // bytes of extra data give its length.
+ elen += int(binary.BigEndian.Uint16(buf))
+ }
+
+ req.Extras = buf[0:elen]
+ req.Key = buf[elen : klen+elen]
+
+ // get the length of extended metadata
+ extMetaLen := 0
+ if elen > 29 {
+ extMetaLen = int(binary.BigEndian.Uint16(req.Extras[28:30]))
+ }
+
+ bodyLen := totalBodyLen - klen - elen - extMetaLen
+ if bodyLen > MaxBodyLen {
+ return n, fmt.Errorf("%d is too big (max %d)",
+ bodyLen, MaxBodyLen)
+ }
+
+ req.Body = buf[klen+elen : klen+elen+bodyLen]
+ req.ExtMeta = buf[klen+elen+bodyLen:]
+ }
+ }
+ return n, err
+}
diff --git a/vendor/github.com/couchbase/gomemcached/mc_res.go b/vendor/github.com/couchbase/gomemcached/mc_res.go
new file mode 100644
index 0000000..2b4cfe1
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/mc_res.go
@@ -0,0 +1,267 @@
+package gomemcached
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sync"
+)
+
+// MCResponse is memcached response
+type MCResponse struct {
+ // The command opcode of the command that sent the request
+ Opcode CommandCode
+ // The status of the response
+ Status Status
+ // The opaque sent in the request
+ Opaque uint32
+ // The CAS identifier (if applicable)
+ Cas uint64
+ // Extras, key, and body for this response
+ Extras, Key, Body []byte
+ // If true, this represents a fatal condition and we should hang up
+ Fatal bool
+ // Datatype identifier
+ DataType uint8
+}
+
+// A debugging string representation of this response
+func (res MCResponse) String() string {
+ return fmt.Sprintf("{MCResponse status=%v keylen=%d, extralen=%d, bodylen=%d}",
+ res.Status, len(res.Key), len(res.Extras), len(res.Body))
+}
+
+// Response as an error.
+func (res *MCResponse) Error() string {
+ return fmt.Sprintf("MCResponse status=%v, opcode=%v, opaque=%v, msg: %s",
+ res.Status, res.Opcode, res.Opaque, string(res.Body))
+}
+
+func errStatus(e error) Status {
+ status := Status(0xffff)
+ if res, ok := e.(*MCResponse); ok {
+ status = res.Status
+ }
+ return status
+}
+
+// IsNotFound is true if this error represents a "not found" response.
+func IsNotFound(e error) bool {
+ return errStatus(e) == KEY_ENOENT
+}
+
+// IsFatal is false if this error isn't believed to be fatal to a connection.
+func IsFatal(e error) bool {
+ if e == nil {
+ return false
+ }
+ _, ok := isFatal[errStatus(e)]
+ if ok {
+ return true
+ }
+ return false
+}
+
+// Size is number of bytes this response consumes on the wire.
+func (res *MCResponse) Size() int {
+ return HDR_LEN + len(res.Extras) + len(res.Key) + len(res.Body)
+}
+
+func (res *MCResponse) fillHeaderBytes(data []byte) int {
+ pos := 0
+ data[pos] = RES_MAGIC
+ pos++
+ data[pos] = byte(res.Opcode)
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2],
+ uint16(len(res.Key)))
+ pos += 2
+
+ // 4
+ data[pos] = byte(len(res.Extras))
+ pos++
+ // Data type
+ if res.DataType != 0 {
+ data[pos] = byte(res.DataType)
+ } else {
+ data[pos] = 0
+ }
+ pos++
+ binary.BigEndian.PutUint16(data[pos:pos+2], uint16(res.Status))
+ pos += 2
+
+ // 8
+ binary.BigEndian.PutUint32(data[pos:pos+4],
+ uint32(len(res.Body)+len(res.Key)+len(res.Extras)))
+ pos += 4
+
+ // 12
+ binary.BigEndian.PutUint32(data[pos:pos+4], res.Opaque)
+ pos += 4
+
+ // 16
+ binary.BigEndian.PutUint64(data[pos:pos+8], res.Cas)
+ pos += 8
+
+ if len(res.Extras) > 0 {
+ copy(data[pos:pos+len(res.Extras)], res.Extras)
+ pos += len(res.Extras)
+ }
+
+ if len(res.Key) > 0 {
+ copy(data[pos:pos+len(res.Key)], res.Key)
+ pos += len(res.Key)
+ }
+
+ return pos
+}
+
+// HeaderBytes will get just the header bytes for this response.
+func (res *MCResponse) HeaderBytes() []byte {
+ data := make([]byte, HDR_LEN+len(res.Extras)+len(res.Key))
+
+ res.fillHeaderBytes(data)
+
+ return data
+}
+
+// Bytes will return the actual bytes transmitted for this response.
+func (res *MCResponse) Bytes() []byte {
+ data := make([]byte, res.Size())
+
+ pos := res.fillHeaderBytes(data)
+
+ copy(data[pos:pos+len(res.Body)], res.Body)
+
+ return data
+}
+
+// Transmit will send this response message across a writer.
+func (res *MCResponse) Transmit(w io.Writer) (n int, err error) {
+ if len(res.Body) < 128 {
+ n, err = w.Write(res.Bytes())
+ } else {
+ n, err = w.Write(res.HeaderBytes())
+ if err == nil {
+ m := 0
+ m, err = w.Write(res.Body)
+ m += n
+ }
+ }
+ return
+}
+
+// Receive will fill this MCResponse with the data from this reader.
+func (res *MCResponse) Receive(r io.Reader, hdrBytes []byte) (n int, err error) {
+ if len(hdrBytes) < HDR_LEN {
+ hdrBytes = []byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0}
+ }
+ n, err = io.ReadFull(r, hdrBytes)
+ if err != nil {
+ return n, err
+ }
+
+ if hdrBytes[0] != RES_MAGIC && hdrBytes[0] != REQ_MAGIC {
+ return n, fmt.Errorf("bad magic: 0x%02x", hdrBytes[0])
+ }
+
+ klen := int(binary.BigEndian.Uint16(hdrBytes[2:4]))
+ elen := int(hdrBytes[4])
+
+ res.Opcode = CommandCode(hdrBytes[1])
+ res.DataType = uint8(hdrBytes[5])
+ res.Status = Status(binary.BigEndian.Uint16(hdrBytes[6:8]))
+ res.Opaque = binary.BigEndian.Uint32(hdrBytes[12:16])
+ res.Cas = binary.BigEndian.Uint64(hdrBytes[16:24])
+
+ bodyLen := int(binary.BigEndian.Uint32(hdrBytes[8:12])) - (klen + elen)
+
+ //defer function to debug the panic seen with MB-15557
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf(`Panic in Receive. Response %v \n
+ key len %v extra len %v bodylen %v`, res, klen, elen, bodyLen)
+ }
+ }()
+
+ buf := make([]byte, klen+elen+bodyLen)
+ m, err := io.ReadFull(r, buf)
+ if err == nil {
+ res.Extras = buf[0:elen]
+ res.Key = buf[elen : klen+elen]
+ res.Body = buf[klen+elen:]
+ }
+
+ return n + m, err
+}
+
+type MCResponsePool struct {
+ pool *sync.Pool
+}
+
+func NewMCResponsePool() *MCResponsePool {
+ rv := &MCResponsePool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return &MCResponse{}
+ },
+ },
+ }
+
+ return rv
+}
+
+func (this *MCResponsePool) Get() *MCResponse {
+ return this.pool.Get().(*MCResponse)
+}
+
+func (this *MCResponsePool) Put(r *MCResponse) {
+ if r == nil {
+ return
+ }
+
+ r.Extras = nil
+ r.Key = nil
+ r.Body = nil
+ r.Fatal = false
+
+ this.pool.Put(r)
+}
+
+type StringMCResponsePool struct {
+ pool *sync.Pool
+ size int
+}
+
+func NewStringMCResponsePool(size int) *StringMCResponsePool {
+ rv := &StringMCResponsePool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return make(map[string]*MCResponse, size)
+ },
+ },
+ size: size,
+ }
+
+ return rv
+}
+
+func (this *StringMCResponsePool) Get() map[string]*MCResponse {
+ return this.pool.Get().(map[string]*MCResponse)
+}
+
+func (this *StringMCResponsePool) Put(m map[string]*MCResponse) {
+ if m == nil || len(m) > 2*this.size {
+ return
+ }
+
+ for k := range m {
+ m[k] = nil
+ delete(m, k)
+ }
+
+ this.pool.Put(m)
+}
diff --git a/vendor/github.com/couchbase/gomemcached/tap.go b/vendor/github.com/couchbase/gomemcached/tap.go
new file mode 100644
index 0000000..e486232
--- /dev/null
+++ b/vendor/github.com/couchbase/gomemcached/tap.go
@@ -0,0 +1,168 @@
+package gomemcached
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+type TapConnectFlag uint32
+
+// Tap connect option flags
+const (
+ BACKFILL = TapConnectFlag(0x01)
+ DUMP = TapConnectFlag(0x02)
+ LIST_VBUCKETS = TapConnectFlag(0x04)
+ TAKEOVER_VBUCKETS = TapConnectFlag(0x08)
+ SUPPORT_ACK = TapConnectFlag(0x10)
+ REQUEST_KEYS_ONLY = TapConnectFlag(0x20)
+ CHECKPOINT = TapConnectFlag(0x40)
+ REGISTERED_CLIENT = TapConnectFlag(0x80)
+ FIX_FLAG_BYTEORDER = TapConnectFlag(0x100)
+)
+
+// Tap opaque event subtypes
+const (
+ TAP_OPAQUE_ENABLE_AUTO_NACK = 0
+ TAP_OPAQUE_INITIAL_VBUCKET_STREAM = 1
+ TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC = 2
+ TAP_OPAQUE_CLOSE_TAP_STREAM = 7
+ TAP_OPAQUE_CLOSE_BACKFILL = 8
+)
+
+// Tap item flags
+const (
+ TAP_ACK = 1
+ TAP_NO_VALUE = 2
+ TAP_FLAG_NETWORK_BYTE_ORDER = 4
+)
+
+// TapConnectFlagNames for TapConnectFlag
+var TapConnectFlagNames = map[TapConnectFlag]string{
+ BACKFILL: "BACKFILL",
+ DUMP: "DUMP",
+ LIST_VBUCKETS: "LIST_VBUCKETS",
+ TAKEOVER_VBUCKETS: "TAKEOVER_VBUCKETS",
+ SUPPORT_ACK: "SUPPORT_ACK",
+ REQUEST_KEYS_ONLY: "REQUEST_KEYS_ONLY",
+ CHECKPOINT: "CHECKPOINT",
+ REGISTERED_CLIENT: "REGISTERED_CLIENT",
+ FIX_FLAG_BYTEORDER: "FIX_FLAG_BYTEORDER",
+}
+
+// TapItemParser is a function to parse a single tap extra.
+type TapItemParser func(io.Reader) (interface{}, error)
+
+// TapParseUint64 is a function to parse a single tap uint64.
+func TapParseUint64(r io.Reader) (interface{}, error) {
+ var rv uint64
+ err := binary.Read(r, binary.BigEndian, &rv)
+ return rv, err
+}
+
+// TapParseUint16 is a function to parse a single tap uint16.
+func TapParseUint16(r io.Reader) (interface{}, error) {
+ var rv uint16
+ err := binary.Read(r, binary.BigEndian, &rv)
+ return rv, err
+}
+
+// TapParseBool is a function to parse a single tap boolean.
+func TapParseBool(r io.Reader) (interface{}, error) {
+ return true, nil
+}
+
+// TapParseVBList parses a list of vBucket numbers as []uint16.
+func TapParseVBList(r io.Reader) (interface{}, error) {
+ num, err := TapParseUint16(r)
+ if err != nil {
+ return nil, err
+ }
+ n := int(num.(uint16))
+
+ rv := make([]uint16, n)
+ for i := 0; i < n; i++ {
+ x, err := TapParseUint16(r)
+ if err != nil {
+ return nil, err
+ }
+ rv[i] = x.(uint16)
+ }
+
+ return rv, err
+}
+
+// TapFlagParsers parser functions for TAP fields.
+var TapFlagParsers = map[TapConnectFlag]TapItemParser{
+ BACKFILL: TapParseUint64,
+ LIST_VBUCKETS: TapParseVBList,
+}
+
+// SplitFlags will split the ORed flags into the individual bit flags.
+func (f TapConnectFlag) SplitFlags() []TapConnectFlag {
+ rv := []TapConnectFlag{}
+ for i := uint32(1); f != 0; i = i << 1 {
+ if uint32(f)&i == i {
+ rv = append(rv, TapConnectFlag(i))
+ }
+ f = TapConnectFlag(uint32(f) & (^i))
+ }
+ return rv
+}
+
+func (f TapConnectFlag) String() string {
+ parts := []string{}
+ for _, x := range f.SplitFlags() {
+ p := TapConnectFlagNames[x]
+ if p == "" {
+ p = fmt.Sprintf("0x%x", int(x))
+ }
+ parts = append(parts, p)
+ }
+ return strings.Join(parts, "|")
+}
+
+type TapConnect struct {
+ Flags map[TapConnectFlag]interface{}
+ RemainingBody []byte
+ Name string
+}
+
+// ParseTapCommands parse the tap request into the interesting bits we may
+// need to do something with.
+func (req *MCRequest) ParseTapCommands() (TapConnect, error) {
+ rv := TapConnect{
+ Flags: map[TapConnectFlag]interface{}{},
+ Name: string(req.Key),
+ }
+
+ if len(req.Extras) < 4 {
+ return rv, fmt.Errorf("not enough extra bytes: %x", req.Extras)
+ }
+
+ flags := TapConnectFlag(binary.BigEndian.Uint32(req.Extras))
+
+ r := bytes.NewReader(req.Body)
+
+ for _, f := range flags.SplitFlags() {
+ fun := TapFlagParsers[f]
+ if fun == nil {
+ fun = TapParseBool
+ }
+
+ val, err := fun(r)
+ if err != nil {
+ return rv, err
+ }
+
+ rv.Flags[f] = val
+ }
+
+ var err error
+ rv.RemainingBody, err = ioutil.ReadAll(r)
+
+ return rv, err
+}
diff --git a/vendor/github.com/couchbase/goutils/LICENSE.md b/vendor/github.com/couchbase/goutils/LICENSE.md
new file mode 100644
index 0000000..a572e24
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/LICENSE.md
@@ -0,0 +1,47 @@
+COUCHBASE INC. COMMUNITY EDITION LICENSE AGREEMENT
+
+IMPORTANT-READ CAREFULLY: BY CLICKING THE "I ACCEPT" BOX OR INSTALLING,
+DOWNLOADING OR OTHERWISE USING THIS SOFTWARE AND ANY ASSOCIATED
+DOCUMENTATION, YOU, ON BEHALF OF YOURSELF OR AS AN AUTHORIZED
+REPRESENTATIVE ON BEHALF OF AN ENTITY ("LICENSEE") AGREE TO ALL THE
+TERMS OF THIS COMMUNITY EDITION LICENSE AGREEMENT (THE "AGREEMENT")
+REGARDING YOUR USE OF THE SOFTWARE. YOU REPRESENT AND WARRANT THAT YOU
+HAVE FULL LEGAL AUTHORITY TO BIND THE LICENSEE TO THIS AGREEMENT. IF YOU
+DO NOT AGREE WITH ALL OF THESE TERMS, DO NOT SELECT THE "I ACCEPT" BOX
+AND DO NOT INSTALL, DOWNLOAD OR OTHERWISE USE THE SOFTWARE. THE
+EFFECTIVE DATE OF THIS AGREEMENT IS THE DATE ON WHICH YOU CLICK "I
+ACCEPT" OR OTHERWISE INSTALL, DOWNLOAD OR USE THE SOFTWARE.
+
+1. License Grant. Couchbase Inc. hereby grants Licensee, free of charge,
+the non-exclusive right to use, copy, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to permit persons to
+whom the Software is furnished to do so, subject to Licensee including
+the following copyright notice in all copies or substantial portions of
+the Software:
+
+Couchbase (r) http://www.Couchbase.com Copyright 2016 Couchbase, Inc.
+
+As used in this Agreement, "Software" means the object code version of
+the applicable elastic data management server software provided by
+Couchbase Inc.
+
+2. Restrictions. Licensee will not reverse engineer, disassemble, or
+decompile the Software (except to the extent such restrictions are
+prohibited by law).
+
+3. Support. Couchbase, Inc. will provide Licensee with access to, and
+use of, the Couchbase, Inc. support forum available at the following
+URL: http://www.couchbase.org/forums/. Couchbase, Inc. may, at its
+discretion, modify, suspend or terminate support at any time upon notice
+to Licensee.
+
+4. Warranty Disclaimer and Limitation of Liability. THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+COUCHBASE INC. OR THE AUTHORS OR COPYRIGHT HOLDERS IN THE SOFTWARE BE
+LIABLE FOR ANY CLAIM, DAMAGES (IINCLUDING, WITHOUT LIMITATION, DIRECT,
+INDIRECT OR CONSEQUENTIAL DAMAGES) OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/couchbase/goutils/logging/logger.go b/vendor/github.com/couchbase/goutils/logging/logger.go
new file mode 100644
index 0000000..ab85463
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/logging/logger.go
@@ -0,0 +1,482 @@
+// Copyright (c) 2016 Couchbase, Inc.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+// except in compliance with the License. You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed under the
+// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+// either express or implied. See the License for the specific language governing permissions
+// and limitations under the License.
+
+package logging
+
+import (
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+type Level int
+
+const (
+ NONE = Level(iota) // Disable all logging
+ FATAL // System is in severe error state and has to abort
+ SEVERE // System is in severe error state and cannot recover reliably
+ ERROR // System is in error state but can recover and continue reliably
+ WARN // System approaching error state, or is in a correct but undesirable state
+ INFO // System-level events and status, in correct states
+ REQUEST // Request-level events, with request-specific rlevel
+ TRACE // Trace detailed system execution, e.g. function entry / exit
+ DEBUG // Debug
+)
+
+type LogEntryFormatter int
+
+const (
+ TEXTFORMATTER = LogEntryFormatter(iota)
+ JSONFORMATTER
+ KVFORMATTER
+ UNIFORMFORMATTER
+)
+
+func (level Level) String() string {
+ return _LEVEL_NAMES[level]
+}
+
+var _LEVEL_NAMES = []string{
+ DEBUG: "DEBUG",
+ TRACE: "TRACE",
+ REQUEST: "REQUEST",
+ INFO: "INFO",
+ WARN: "WARN",
+ ERROR: "ERROR",
+ SEVERE: "SEVERE",
+ FATAL: "FATAL",
+ NONE: "NONE",
+}
+
+var _LEVEL_MAP = map[string]Level{
+ "debug": DEBUG,
+ "trace": TRACE,
+ "request": REQUEST,
+ "info": INFO,
+ "warn": WARN,
+ "error": ERROR,
+ "severe": SEVERE,
+ "fatal": FATAL,
+ "none": NONE,
+}
+
+func ParseLevel(name string) (level Level, ok bool) {
+ level, ok = _LEVEL_MAP[strings.ToLower(name)]
+ return
+}
+
+/*
+
+Pair supports logging of key-value pairs. Keys beginning with _ are
+reserved for the logger, e.g. _time, _level, _msg, and _rlevel. The
+Pair APIs are designed to avoid heap allocation and garbage
+collection.
+
+*/
+type Pairs []Pair
+type Pair struct {
+ Name string
+ Value interface{}
+}
+
+/*
+
+Map allows key-value pairs to be specified using map literals or data
+structures. For example:
+
+Errorm(msg, Map{...})
+
+Map incurs heap allocation and garbage collection, so the Pair APIs
+should be preferred.
+
+*/
+type Map map[string]interface{}
+
+// Logger provides a common interface for logging libraries
+type Logger interface {
+ /*
+ These APIs write all the given pairs in addition to standard logger keys.
+ */
+ Logp(level Level, msg string, kv ...Pair)
+
+ Debugp(msg string, kv ...Pair)
+
+ Tracep(msg string, kv ...Pair)
+
+ Requestp(rlevel Level, msg string, kv ...Pair)
+
+ Infop(msg string, kv ...Pair)
+
+ Warnp(msg string, kv ...Pair)
+
+ Errorp(msg string, kv ...Pair)
+
+ Severep(msg string, kv ...Pair)
+
+ Fatalp(msg string, kv ...Pair)
+
+ /*
+ These APIs write the fields in the given kv Map in addition to standard logger keys.
+ */
+ Logm(level Level, msg string, kv Map)
+
+ Debugm(msg string, kv Map)
+
+ Tracem(msg string, kv Map)
+
+ Requestm(rlevel Level, msg string, kv Map)
+
+ Infom(msg string, kv Map)
+
+ Warnm(msg string, kv Map)
+
+ Errorm(msg string, kv Map)
+
+ Severem(msg string, kv Map)
+
+ Fatalm(msg string, kv Map)
+
+ /*
+
+ These APIs only write _msg, _time, _level, and other logger keys. If
+ the msg contains other fields, use the Pair or Map APIs instead.
+
+ */
+ Logf(level Level, fmt string, args ...interface{})
+
+ Debugf(fmt string, args ...interface{})
+
+ Tracef(fmt string, args ...interface{})
+
+ Requestf(rlevel Level, fmt string, args ...interface{})
+
+ Infof(fmt string, args ...interface{})
+
+ Warnf(fmt string, args ...interface{})
+
+ Errorf(fmt string, args ...interface{})
+
+ Severef(fmt string, args ...interface{})
+
+ Fatalf(fmt string, args ...interface{})
+
+ /*
+ These APIs control the logging level
+ */
+
+ SetLevel(Level) // Set the logging level
+
+ Level() Level // Get the current logging level
+}
+
+var logger Logger = nil
+var curLevel Level = DEBUG // initially set to never skip
+
+var loggerMutex sync.RWMutex
+
+// All the methods below first acquire the mutex (mostly in exclusive mode)
+// and only then check if logging at the current level is enabled.
+// This introduces a fair bottleneck for those log entries that should be
+// skipped (the majority, at INFO or below levels)
+// We try to predict here if we should lock the mutex at all by caching
+// the current log level: while dynamically changing logger, there might
+// be the odd entry skipped as the new level is cached.
+// Since we seem to never change the logger, this is not an issue.
+func skipLogging(level Level) bool {
+ if logger == nil {
+ return true
+ }
+ return level > curLevel
+}
+
+func SetLogger(newLogger Logger) {
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger = newLogger
+ if logger == nil {
+ curLevel = NONE
+ } else {
+ curLevel = newLogger.Level()
+ }
+}
+
+func Logp(level Level, msg string, kv ...Pair) {
+ if skipLogging(level) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Logp(level, msg, kv...)
+}
+
+func Debugp(msg string, kv ...Pair) {
+ if skipLogging(DEBUG) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Debugp(msg, kv...)
+}
+
+func Tracep(msg string, kv ...Pair) {
+ if skipLogging(TRACE) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Tracep(msg, kv...)
+}
+
+func Requestp(rlevel Level, msg string, kv ...Pair) {
+ if skipLogging(REQUEST) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Requestp(rlevel, msg, kv...)
+}
+
+func Infop(msg string, kv ...Pair) {
+ if skipLogging(INFO) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Infop(msg, kv...)
+}
+
+func Warnp(msg string, kv ...Pair) {
+ if skipLogging(WARN) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Warnp(msg, kv...)
+}
+
+func Errorp(msg string, kv ...Pair) {
+ if skipLogging(ERROR) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Errorp(msg, kv...)
+}
+
+func Severep(msg string, kv ...Pair) {
+ if skipLogging(SEVERE) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Severep(msg, kv...)
+}
+
+func Fatalp(msg string, kv ...Pair) {
+ if skipLogging(FATAL) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Fatalp(msg, kv...)
+}
+
+func Logm(level Level, msg string, kv Map) {
+ if skipLogging(level) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Logm(level, msg, kv)
+}
+
+func Debugm(msg string, kv Map) {
+ if skipLogging(DEBUG) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Debugm(msg, kv)
+}
+
+func Tracem(msg string, kv Map) {
+ if skipLogging(TRACE) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Tracem(msg, kv)
+}
+
+func Requestm(rlevel Level, msg string, kv Map) {
+ if skipLogging(REQUEST) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Requestm(rlevel, msg, kv)
+}
+
+func Infom(msg string, kv Map) {
+ if skipLogging(INFO) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Infom(msg, kv)
+}
+
+func Warnm(msg string, kv Map) {
+ if skipLogging(WARN) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Warnm(msg, kv)
+}
+
+func Errorm(msg string, kv Map) {
+ if skipLogging(ERROR) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Errorm(msg, kv)
+}
+
+func Severem(msg string, kv Map) {
+ if skipLogging(SEVERE) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Severem(msg, kv)
+}
+
+func Fatalm(msg string, kv Map) {
+ if skipLogging(FATAL) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Fatalm(msg, kv)
+}
+
+func Logf(level Level, fmt string, args ...interface{}) {
+ if skipLogging(level) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Logf(level, fmt, args...)
+}
+
+func Debugf(fmt string, args ...interface{}) {
+ if skipLogging(DEBUG) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Debugf(fmt, args...)
+}
+
+func Tracef(fmt string, args ...interface{}) {
+ if skipLogging(TRACE) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Tracef(fmt, args...)
+}
+
+func Requestf(rlevel Level, fmt string, args ...interface{}) {
+ if skipLogging(REQUEST) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Requestf(rlevel, fmt, args...)
+}
+
+func Infof(fmt string, args ...interface{}) {
+ if skipLogging(INFO) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Infof(fmt, args...)
+}
+
+func Warnf(fmt string, args ...interface{}) {
+ if skipLogging(WARN) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Warnf(fmt, args...)
+}
+
+func Errorf(fmt string, args ...interface{}) {
+ if skipLogging(ERROR) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Errorf(fmt, args...)
+}
+
+func Severef(fmt string, args ...interface{}) {
+ if skipLogging(SEVERE) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Severef(fmt, args...)
+}
+
+func Fatalf(fmt string, args ...interface{}) {
+ if skipLogging(FATAL) {
+ return
+ }
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Fatalf(fmt, args...)
+}
+
+func SetLevel(level Level) {
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.SetLevel(level)
+ curLevel = level
+}
+
+func LogLevel() Level {
+ loggerMutex.RLock()
+ defer loggerMutex.RUnlock()
+ return logger.Level()
+}
+
+func Stackf(level Level, fmt string, args ...interface{}) {
+ if skipLogging(level) {
+ return
+ }
+ buf := make([]byte, 1<<16)
+ n := runtime.Stack(buf, false)
+ s := string(buf[0:n])
+ loggerMutex.Lock()
+ defer loggerMutex.Unlock()
+ logger.Logf(level, fmt, args...)
+ logger.Logf(level, s)
+}
+
+func init() {
+ logger := NewLogger(os.Stderr, INFO, TEXTFORMATTER)
+ SetLogger(logger)
+}
diff --git a/vendor/github.com/couchbase/goutils/logging/logger_golog.go b/vendor/github.com/couchbase/goutils/logging/logger_golog.go
new file mode 100644
index 0000000..14fd3c3
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/logging/logger_golog.go
@@ -0,0 +1,365 @@
+// Copyright (c) 2016-2019 Couchbase, Inc.
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+// except in compliance with the License. You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed under the
+// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+// either express or implied. See the License for the specific language governing permissions
+// and limitations under the License.
+
+package logging
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "time"
+)
+
+type goLogger struct {
+ logger *log.Logger
+ level Level
+ entryFormatter formatter
+}
+
+const (
+ _LEVEL = "_level"
+ _MSG = "_msg"
+ _TIME = "_time"
+ _RLEVEL = "_rlevel"
+)
+
+func NewLogger(out io.Writer, lvl Level, fmtLogging LogEntryFormatter, fmtArgs ...interface{}) *goLogger {
+ logger := &goLogger{
+ logger: log.New(out, "", 0),
+ level: lvl,
+ }
+ if fmtLogging == JSONFORMATTER {
+ logger.entryFormatter = &jsonFormatter{}
+ } else if fmtLogging == KVFORMATTER {
+ logger.entryFormatter = &keyvalueFormatter{}
+ } else if fmtLogging == UNIFORMFORMATTER {
+ logger.entryFormatter = &uniformFormatter{
+ callback: fmtArgs[0].(ComponentCallback),
+ }
+ } else {
+ logger.entryFormatter = &textFormatter{}
+ }
+ return logger
+}
+
+func (gl *goLogger) Logp(level Level, msg string, kv ...Pair) {
+ if gl.logger == nil {
+ return
+ }
+ if level <= gl.level {
+ e := newLogEntry(msg, level)
+ copyPairs(e, kv)
+ gl.log(e)
+ }
+}
+
+func (gl *goLogger) Debugp(msg string, kv ...Pair) {
+ gl.Logp(DEBUG, msg, kv...)
+}
+
+func (gl *goLogger) Tracep(msg string, kv ...Pair) {
+ gl.Logp(TRACE, msg, kv...)
+}
+
+func (gl *goLogger) Requestp(rlevel Level, msg string, kv ...Pair) {
+ if gl.logger == nil {
+ return
+ }
+ if REQUEST <= gl.level {
+ e := newLogEntry(msg, REQUEST)
+ e.Rlevel = rlevel
+ copyPairs(e, kv)
+ gl.log(e)
+ }
+}
+
+func (gl *goLogger) Infop(msg string, kv ...Pair) {
+ gl.Logp(INFO, msg, kv...)
+}
+
+func (gl *goLogger) Warnp(msg string, kv ...Pair) {
+ gl.Logp(WARN, msg, kv...)
+}
+
+func (gl *goLogger) Errorp(msg string, kv ...Pair) {
+ gl.Logp(ERROR, msg, kv...)
+}
+
+func (gl *goLogger) Severep(msg string, kv ...Pair) {
+ gl.Logp(SEVERE, msg, kv...)
+}
+
+func (gl *goLogger) Fatalp(msg string, kv ...Pair) {
+ gl.Logp(FATAL, msg, kv...)
+}
+
+func (gl *goLogger) Logm(level Level, msg string, kv Map) {
+ if gl.logger == nil {
+ return
+ }
+ if level <= gl.level {
+ e := newLogEntry(msg, level)
+ e.Data = kv
+ gl.log(e)
+ }
+}
+
+func (gl *goLogger) Debugm(msg string, kv Map) {
+ gl.Logm(DEBUG, msg, kv)
+}
+
+func (gl *goLogger) Tracem(msg string, kv Map) {
+ gl.Logm(TRACE, msg, kv)
+}
+
+func (gl *goLogger) Requestm(rlevel Level, msg string, kv Map) {
+ if gl.logger == nil {
+ return
+ }
+ if REQUEST <= gl.level {
+ e := newLogEntry(msg, REQUEST)
+ e.Rlevel = rlevel
+ e.Data = kv
+ gl.log(e)
+ }
+}
+
+func (gl *goLogger) Infom(msg string, kv Map) {
+ gl.Logm(INFO, msg, kv)
+}
+
+func (gl *goLogger) Warnm(msg string, kv Map) {
+ gl.Logm(WARN, msg, kv)
+}
+
+func (gl *goLogger) Errorm(msg string, kv Map) {
+ gl.Logm(ERROR, msg, kv)
+}
+
+func (gl *goLogger) Severem(msg string, kv Map) {
+ gl.Logm(SEVERE, msg, kv)
+}
+
+func (gl *goLogger) Fatalm(msg string, kv Map) {
+ gl.Logm(FATAL, msg, kv)
+}
+
+func (gl *goLogger) Logf(level Level, format string, args ...interface{}) {
+ if gl.logger == nil {
+ return
+ }
+ if level <= gl.level {
+ e := newLogEntry(fmt.Sprintf(format, args...), level)
+ gl.log(e)
+ }
+}
+
+func (gl *goLogger) Debugf(format string, args ...interface{}) {
+ gl.Logf(DEBUG, format, args...)
+}
+
+func (gl *goLogger) Tracef(format string, args ...interface{}) {
+ gl.Logf(TRACE, format, args...)
+}
+
+func (gl *goLogger) Requestf(rlevel Level, format string, args ...interface{}) {
+ if gl.logger == nil {
+ return
+ }
+ if REQUEST <= gl.level {
+ e := newLogEntry(fmt.Sprintf(format, args...), REQUEST)
+ e.Rlevel = rlevel
+ gl.log(e)
+ }
+}
+
+func (gl *goLogger) Infof(format string, args ...interface{}) {
+ gl.Logf(INFO, format, args...)
+}
+
+func (gl *goLogger) Warnf(format string, args ...interface{}) {
+ gl.Logf(WARN, format, args...)
+}
+
+func (gl *goLogger) Errorf(format string, args ...interface{}) {
+ gl.Logf(ERROR, format, args...)
+}
+
+func (gl *goLogger) Severef(format string, args ...interface{}) {
+ gl.Logf(SEVERE, format, args...)
+}
+
+func (gl *goLogger) Fatalf(format string, args ...interface{}) {
+ gl.Logf(FATAL, format, args...)
+}
+
+func (gl *goLogger) Level() Level {
+ return gl.level
+}
+
+func (gl *goLogger) SetLevel(level Level) {
+ gl.level = level
+}
+
+func (gl *goLogger) log(newEntry *logEntry) {
+ s := gl.entryFormatter.format(newEntry)
+ gl.logger.Print(s)
+}
+
+type logEntry struct {
+ Time string
+ Level Level
+ Rlevel Level
+ Message string
+ Data Map
+}
+
+func newLogEntry(msg string, level Level) *logEntry {
+ return &logEntry{
+ Time: time.Now().Format("2006-01-02T15:04:05.000-07:00"), // time.RFC3339 with milliseconds
+ Level: level,
+ Rlevel: NONE,
+ Message: msg,
+ }
+}
+
+func copyPairs(newEntry *logEntry, pairs []Pair) {
+ newEntry.Data = make(Map, len(pairs))
+ for _, p := range pairs {
+ newEntry.Data[p.Name] = p.Value
+ }
+}
+
+type formatter interface {
+ format(*logEntry) string
+}
+
+type textFormatter struct {
+}
+
+// ex. 2016-02-10T09:15:25.498-08:00 [INFO] This is a message from test in text format
+
+func (*textFormatter) format(newEntry *logEntry) string {
+ b := &bytes.Buffer{}
+ appendValue(b, newEntry.Time)
+ if newEntry.Rlevel != NONE {
+ fmt.Fprintf(b, "[%s,%s] ", newEntry.Level.String(), newEntry.Rlevel.String())
+ } else {
+ fmt.Fprintf(b, "[%s] ", newEntry.Level.String())
+ }
+ appendValue(b, newEntry.Message)
+ for key, value := range newEntry.Data {
+ appendKeyValue(b, key, value)
+ }
+ b.WriteByte('\n')
+ s := bytes.NewBuffer(b.Bytes())
+ return s.String()
+}
+
+func appendValue(b *bytes.Buffer, value interface{}) {
+ if _, ok := value.(string); ok {
+ fmt.Fprintf(b, "%s ", value)
+ } else {
+ fmt.Fprintf(b, "%v ", value)
+ }
+}
+
+type keyvalueFormatter struct {
+}
+
+// ex. _time=2016-02-10T09:15:25.498-08:00 _level=INFO _msg=This is a message from test in key-value format
+
+func (*keyvalueFormatter) format(newEntry *logEntry) string {
+ b := &bytes.Buffer{}
+ appendKeyValue(b, _TIME, newEntry.Time)
+ appendKeyValue(b, _LEVEL, newEntry.Level.String())
+ if newEntry.Rlevel != NONE {
+ appendKeyValue(b, _RLEVEL, newEntry.Rlevel.String())
+ }
+ appendKeyValue(b, _MSG, newEntry.Message)
+ for key, value := range newEntry.Data {
+ appendKeyValue(b, key, value)
+ }
+ b.WriteByte('\n')
+ s := bytes.NewBuffer(b.Bytes())
+ return s.String()
+}
+
+func appendKeyValue(b *bytes.Buffer, key, value interface{}) {
+ if _, ok := value.(string); ok {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%v ", key, value)
+ }
+}
+
+type jsonFormatter struct {
+}
+
+// ex. {"_level":"INFO","_msg":"This is a message from test in json format","_time":"2016-02-10T09:12:59.518-08:00"}
+
+func (*jsonFormatter) format(newEntry *logEntry) string {
+ if newEntry.Data == nil {
+ newEntry.Data = make(Map, 5)
+ }
+ newEntry.Data[_TIME] = newEntry.Time
+ newEntry.Data[_LEVEL] = newEntry.Level.String()
+ if newEntry.Rlevel != NONE {
+ newEntry.Data[_RLEVEL] = newEntry.Rlevel.String()
+ }
+ newEntry.Data[_MSG] = newEntry.Message
+ serialized, _ := json.Marshal(newEntry.Data)
+ s := bytes.NewBuffer(append(serialized, '\n'))
+ return s.String()
+}
+
+type ComponentCallback func() string
+
+type uniformFormatter struct {
+ callback ComponentCallback
+}
+
+// ex. 2019-03-15T11:28:07.652-04:00 DEBU COMPONENT.subcomponent This is a message from test in uniform format
+
+var _LEVEL_UNIFORM = []string{
+ DEBUG: "DEBU",
+ TRACE: "TRAC",
+ REQUEST: "REQU",
+ INFO: "INFO",
+ WARN: "WARN",
+ ERROR: "ERRO",
+ SEVERE: "SEVE",
+ FATAL: "FATA",
+ NONE: "NONE",
+}
+
+func (level Level) UniformString() string {
+ return _LEVEL_UNIFORM[level]
+}
+
+func (uf *uniformFormatter) format(newEntry *logEntry) string {
+ b := &bytes.Buffer{}
+ appendValue(b, newEntry.Time)
+ component := uf.callback()
+ if newEntry.Rlevel != NONE {
+ // not really any accommodation for a composite level in the uniform standard; just output as abbr,abbr
+ fmt.Fprintf(b, "%s,%s %s ", newEntry.Level.UniformString(), newEntry.Rlevel.UniformString(), component)
+ } else {
+ fmt.Fprintf(b, "%s %s ", newEntry.Level.UniformString(), component)
+ }
+ appendValue(b, newEntry.Message)
+ for key, value := range newEntry.Data {
+ appendKeyValue(b, key, value)
+ }
+ b.WriteByte('\n')
+ s := bytes.NewBuffer(b.Bytes())
+ return s.String()
+}
diff --git a/vendor/github.com/couchbase/goutils/scramsha/scramsha.go b/vendor/github.com/couchbase/goutils/scramsha/scramsha.go
new file mode 100644
index 0000000..b234bfc
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/scramsha/scramsha.go
@@ -0,0 +1,207 @@
+// @author Couchbase
+// @copyright 2018 Couchbase, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package scramsha provides implementation of client side SCRAM-SHA
+// according to https://tools.ietf.org/html/rfc5802
+package scramsha
+
+import (
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/base64"
+ "fmt"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/pbkdf2"
+ "hash"
+ "strconv"
+ "strings"
+)
+
+func hmacHash(message []byte, secret []byte, hashFunc func() hash.Hash) []byte {
+ h := hmac.New(hashFunc, secret)
+ h.Write(message)
+ return h.Sum(nil)
+}
+
+func shaHash(message []byte, hashFunc func() hash.Hash) []byte {
+ h := hashFunc()
+ h.Write(message)
+ return h.Sum(nil)
+}
+
+func generateClientNonce(size int) (string, error) {
+ randomBytes := make([]byte, size)
+ _, err := rand.Read(randomBytes)
+ if err != nil {
+ return "", errors.Wrap(err, "Unable to generate nonce")
+ }
+ return base64.StdEncoding.EncodeToString(randomBytes), nil
+}
+
+// ScramSha provides context for SCRAM-SHA handling
+type ScramSha struct {
+ hashSize int
+ hashFunc func() hash.Hash
+ clientNonce string
+ serverNonce string
+ salt []byte
+ i int
+ saltedPassword []byte
+ authMessage string
+}
+
+var knownMethods = []string{"SCRAM-SHA512", "SCRAM-SHA256", "SCRAM-SHA1"}
+
+// BestMethod returns SCRAM-SHA method we consider the best out of suggested
+// by server
+func BestMethod(methods string) (string, error) {
+ for _, m := range knownMethods {
+ if strings.Index(methods, m) != -1 {
+ return m, nil
+ }
+ }
+ return "", errors.Errorf(
+ "None of the server suggested methods [%s] are supported",
+ methods)
+}
+
+// NewScramSha creates context for SCRAM-SHA handling
+func NewScramSha(method string) (*ScramSha, error) {
+ s := &ScramSha{}
+
+ if method == knownMethods[0] {
+ s.hashFunc = sha512.New
+ s.hashSize = 64
+ } else if method == knownMethods[1] {
+ s.hashFunc = sha256.New
+ s.hashSize = 32
+ } else if method == knownMethods[2] {
+ s.hashFunc = sha1.New
+ s.hashSize = 20
+ } else {
+ return nil, errors.Errorf("Unsupported method %s", method)
+ }
+ return s, nil
+}
+
+// GetStartRequest builds start SCRAM-SHA request to be sent to server
+func (s *ScramSha) GetStartRequest(user string) (string, error) {
+ var err error
+ s.clientNonce, err = generateClientNonce(24)
+ if err != nil {
+ return "", errors.Wrapf(err, "Unable to generate SCRAM-SHA "+
+ "start request for user %s", user)
+ }
+
+ message := fmt.Sprintf("n,,n=%s,r=%s", user, s.clientNonce)
+ s.authMessage = message[3:]
+ return message, nil
+}
+
+// HandleStartResponse handles server response on start SCRAM-SHA request
+func (s *ScramSha) HandleStartResponse(response string) error {
+ parts := strings.Split(response, ",")
+ if len(parts) != 3 {
+ return errors.Errorf("expected 3 fields in first SCRAM-SHA-1 "+
+ "server message %s", response)
+ }
+ if !strings.HasPrefix(parts[0], "r=") || len(parts[0]) < 3 {
+ return errors.Errorf("Server sent an invalid nonce %s",
+ parts[0])
+ }
+ if !strings.HasPrefix(parts[1], "s=") || len(parts[1]) < 3 {
+ return errors.Errorf("Server sent an invalid salt %s", parts[1])
+ }
+ if !strings.HasPrefix(parts[2], "i=") || len(parts[2]) < 3 {
+ return errors.Errorf("Server sent an invalid iteration count %s",
+ parts[2])
+ }
+
+ s.serverNonce = parts[0][2:]
+ encodedSalt := parts[1][2:]
+ var err error
+ s.i, err = strconv.Atoi(parts[2][2:])
+ if err != nil {
+ return errors.Errorf("Iteration count %s must be integer.",
+ parts[2][2:])
+ }
+
+ if s.i < 1 {
+ return errors.New("Iteration count should be positive")
+ }
+
+ if !strings.HasPrefix(s.serverNonce, s.clientNonce) {
+ return errors.Errorf("Server nonce %s doesn't contain client"+
+ " nonce %s", s.serverNonce, s.clientNonce)
+ }
+
+ s.salt, err = base64.StdEncoding.DecodeString(encodedSalt)
+ if err != nil {
+ return errors.Wrapf(err, "Unable to decode salt %s",
+ encodedSalt)
+ }
+
+ s.authMessage = s.authMessage + "," + response
+ return nil
+}
+
+// GetFinalRequest builds final SCRAM-SHA request to be sent to server
+func (s *ScramSha) GetFinalRequest(pass string) string {
+ clientFinalMessageBare := "c=biws,r=" + s.serverNonce
+ s.authMessage = s.authMessage + "," + clientFinalMessageBare
+
+ s.saltedPassword = pbkdf2.Key([]byte(pass), s.salt, s.i,
+ s.hashSize, s.hashFunc)
+
+ clientKey := hmacHash([]byte("Client Key"), s.saltedPassword, s.hashFunc)
+ storedKey := shaHash(clientKey, s.hashFunc)
+ clientSignature := hmacHash([]byte(s.authMessage), storedKey, s.hashFunc)
+
+ clientProof := make([]byte, len(clientSignature))
+ for i := 0; i < len(clientSignature); i++ {
+ clientProof[i] = clientKey[i] ^ clientSignature[i]
+ }
+
+ return clientFinalMessageBare + ",p=" +
+ base64.StdEncoding.EncodeToString(clientProof)
+}
+
+// HandleFinalResponse handles server's response on final SCRAM-SHA request
+func (s *ScramSha) HandleFinalResponse(response string) error {
+ if strings.Contains(response, ",") ||
+ !strings.HasPrefix(response, "v=") {
+ return errors.Errorf("Server sent an invalid final message %s",
+ response)
+ }
+
+ decodedMessage, err := base64.StdEncoding.DecodeString(response[2:])
+ if err != nil {
+ return errors.Wrapf(err, "Unable to decode server message %s",
+ response[2:])
+ }
+ serverKey := hmacHash([]byte("Server Key"), s.saltedPassword,
+ s.hashFunc)
+ serverSignature := hmacHash([]byte(s.authMessage), serverKey,
+ s.hashFunc)
+ if string(decodedMessage) != string(serverSignature) {
+ return errors.Errorf("Server proof %s doesn't match "+
+ "the expected: %s",
+ string(decodedMessage), string(serverSignature))
+ }
+ return nil
+}
diff --git a/vendor/github.com/couchbase/goutils/scramsha/scramsha_http.go b/vendor/github.com/couchbase/goutils/scramsha/scramsha_http.go
new file mode 100644
index 0000000..19f32b3
--- /dev/null
+++ b/vendor/github.com/couchbase/goutils/scramsha/scramsha_http.go
@@ -0,0 +1,252 @@
+// @author Couchbase
+// @copyright 2018 Couchbase, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package scramsha provides implementation of client side SCRAM-SHA
+// via Http according to https://tools.ietf.org/html/rfc7804
+package scramsha
+
+import (
+ "encoding/base64"
+ "github.com/pkg/errors"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+)
+
+// consts used to parse scramsha response from target
+const (
+ WWWAuthenticate = "WWW-Authenticate"
+ AuthenticationInfo = "Authentication-Info"
+ Authorization = "Authorization"
+ DataPrefix = "data="
+ SidPrefix = "sid="
+)
+
+// Request provides implementation of http request that can be retried
+type Request struct {
+ body io.ReadSeeker
+
+ // Embed an HTTP request directly. This makes a *Request act exactly
+ // like an *http.Request so that all meta methods are supported.
+ *http.Request
+}
+
+type lenReader interface {
+ Len() int
+}
+
+// NewRequest creates http request that can be retried
+func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) {
+ // Wrap the body in a noop ReadCloser if non-nil. This prevents the
+ // reader from being closed by the HTTP client.
+ var rcBody io.ReadCloser
+ if body != nil {
+ rcBody = ioutil.NopCloser(body)
+ }
+
+ // Make the request with the noop-closer for the body.
+ httpReq, err := http.NewRequest(method, url, rcBody)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if we can set the Content-Length automatically.
+ if lr, ok := body.(lenReader); ok {
+ httpReq.ContentLength = int64(lr.Len())
+ }
+
+ return &Request{body, httpReq}, nil
+}
+
+func encode(str string) string {
+ return base64.StdEncoding.EncodeToString([]byte(str))
+}
+
+func decode(str string) (string, error) {
+ bytes, err := base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return "", errors.Errorf("Cannot base64 decode %s",
+ str)
+ }
+ return string(bytes), err
+}
+
+func trimPrefix(s, prefix string) (string, error) {
+ l := len(s)
+ trimmed := strings.TrimPrefix(s, prefix)
+ if l == len(trimmed) {
+ return trimmed, errors.Errorf("Prefix %s not found in %s",
+ prefix, s)
+ }
+ return trimmed, nil
+}
+
+func drainBody(resp *http.Response) {
+ defer resp.Body.Close()
+ io.Copy(ioutil.Discard, resp.Body)
+}
+
+// DoScramSha performs SCRAM-SHA handshake via Http
+func DoScramSha(req *Request,
+ username string,
+ password string,
+ client *http.Client) (*http.Response, error) {
+
+ method := "SCRAM-SHA-512"
+ s, err := NewScramSha("SCRAM-SHA512")
+ if err != nil {
+ return nil, errors.Wrap(err,
+ "Unable to initialize SCRAM-SHA handler")
+ }
+
+ message, err := s.GetStartRequest(username)
+ if err != nil {
+ return nil, err
+ }
+
+ encodedMessage := method + " " + DataPrefix + encode(message)
+
+ req.Header.Set(Authorization, encodedMessage)
+
+ res, err := client.Do(req.Request)
+ if err != nil {
+ return nil, errors.Wrap(err, "Problem sending SCRAM-SHA start"+
+ "request")
+ }
+
+ if res.StatusCode != http.StatusUnauthorized {
+ return res, nil
+ }
+
+ authHeader := res.Header.Get(WWWAuthenticate)
+ if authHeader == "" {
+ drainBody(res)
+ return nil, errors.Errorf("Header %s is not populated in "+
+ "SCRAM-SHA start response", WWWAuthenticate)
+ }
+
+ authHeader, err = trimPrefix(authHeader, method+" ")
+ if err != nil {
+ if strings.HasPrefix(authHeader, "Basic ") {
+ // user not found
+ return res, nil
+ }
+ drainBody(res)
+ return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+
+ "start response %s", authHeader)
+ }
+
+ drainBody(res)
+
+ sid, response, err := parseSidAndData(authHeader)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+
+ "start response %s", authHeader)
+ }
+
+ err = s.HandleStartResponse(response)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing SCRAM-SHA start "+
+ "response %s", response)
+ }
+
+ message = s.GetFinalRequest(password)
+ encodedMessage = method + " " + SidPrefix + sid + "," + DataPrefix +
+ encode(message)
+
+ req.Header.Set(Authorization, encodedMessage)
+
+ // rewind request body so it can be resent again
+ if req.body != nil {
+ if _, err = req.body.Seek(0, 0); err != nil {
+ return nil, errors.Errorf("Failed to seek body: %v",
+ err)
+ }
+ }
+
+ res, err = client.Do(req.Request)
+ if err != nil {
+ return nil, errors.Wrap(err, "Problem sending SCRAM-SHA final"+
+ "request")
+ }
+
+ if res.StatusCode == http.StatusUnauthorized {
+ // TODO retrieve and return error
+ return res, nil
+ }
+
+ if res.StatusCode >= http.StatusInternalServerError {
+ // in this case we cannot expect server to set headers properly
+ return res, nil
+ }
+
+ authHeader = res.Header.Get(AuthenticationInfo)
+ if authHeader == "" {
+ drainBody(res)
+ return nil, errors.Errorf("Header %s is not populated in "+
+ "SCRAM-SHA final response", AuthenticationInfo)
+ }
+
+ finalSid, response, err := parseSidAndData(authHeader)
+ if err != nil {
+ drainBody(res)
+ return nil, errors.Wrapf(err, "Error while parsing SCRAM-SHA "+
+ "final response %s", authHeader)
+ }
+
+ if finalSid != sid {
+ drainBody(res)
+ return nil, errors.Errorf("Sid %s returned by server "+
+ "doesn't match the original sid %s", finalSid, sid)
+ }
+
+ err = s.HandleFinalResponse(response)
+ if err != nil {
+ drainBody(res)
+ return nil, errors.Wrapf(err,
+ "Error handling SCRAM-SHA final server response %s",
+ response)
+ }
+ return res, nil
+}
+
+func parseSidAndData(authHeader string) (string, string, error) {
+ sidIndex := strings.Index(authHeader, SidPrefix)
+ if sidIndex < 0 {
+ return "", "", errors.Errorf("Cannot find %s in %s",
+ SidPrefix, authHeader)
+ }
+
+ sidEndIndex := strings.Index(authHeader, ",")
+ if sidEndIndex < 0 {
+ return "", "", errors.Errorf("Cannot find ',' in %s",
+ authHeader)
+ }
+
+ sid := authHeader[sidIndex+len(SidPrefix) : sidEndIndex]
+
+ dataIndex := strings.Index(authHeader, DataPrefix)
+ if dataIndex < 0 {
+ return "", "", errors.Errorf("Cannot find %s in %s",
+ DataPrefix, authHeader)
+ }
+
+ data, err := decode(authHeader[dataIndex+len(DataPrefix):])
+ if err != nil {
+ return "", "", err
+ }
+ return sid, data, nil
+}
diff --git a/vendor/github.com/cupcake/rdb/.gitignore b/vendor/github.com/cupcake/rdb/.gitignore
new file mode 100644
index 0000000..fcc1e66
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# Project-specific files
+diff
diff --git a/vendor/github.com/cupcake/rdb/.travis.yml b/vendor/github.com/cupcake/rdb/.travis.yml
new file mode 100644
index 0000000..49c6fb8
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+go:
+ - 1.1
+ - tip
+before_install:
+ - go get gopkg.in/check.v1
diff --git a/vendor/github.com/cupcake/rdb/LICENCE b/vendor/github.com/cupcake/rdb/LICENCE
new file mode 100644
index 0000000..5025790
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/LICENCE
@@ -0,0 +1,21 @@
+Copyright (c) 2012 Jonathan Rudenberg
+Copyright (c) 2012 Sripathi Krishnan
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cupcake/rdb/README.md b/vendor/github.com/cupcake/rdb/README.md
new file mode 100644
index 0000000..5c19212
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/README.md
@@ -0,0 +1,17 @@
+# rdb [![Build Status](https://travis-ci.org/cupcake/rdb.png?branch=master)](https://travis-ci.org/cupcake/rdb)
+
+rdb is a Go package that implements parsing and encoding of the
+[Redis](http://redis.io) [RDB file
+format](https://github.com/sripathikrishnan/redis-rdb-tools/blob/master/docs/RDB_File_Format.textile).
+
+This package was heavily inspired by
+[redis-rdb-tools](https://github.com/sripathikrishnan/redis-rdb-tools) by
+[Sripathi Krishnan](https://github.com/sripathikrishnan).
+
+[**Documentation**](http://godoc.org/github.com/cupcake/rdb)
+
+## Installation
+
+```
+go get github.com/cupcake/rdb
+```
diff --git a/vendor/github.com/cupcake/rdb/crc64/crc64.go b/vendor/github.com/cupcake/rdb/crc64/crc64.go
new file mode 100644
index 0000000..54fed9c
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/crc64/crc64.go
@@ -0,0 +1,64 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crc64 implements the Jones coefficients with an init value of 0.
+package crc64
+
+import "hash"
+
+// Redis uses the CRC64 variant with "Jones" coefficients and init value of 0.
+//
+// Specification of this CRC64 variant follows:
+// Name: crc-64-jones
+// Width: 64 bits
+// Poly: 0xad93d23594c935a9
+// Reflected In: True
+// Xor_In: 0xffffffffffffffff
+// Reflected_Out: True
+// Xor_Out: 0x0
+
+var table = [256]uint64{0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b, 0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04, 0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371, 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, 0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f, 0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0, 0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206, 0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc, 0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08, 0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87, 0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2, 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, 0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891, 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e, 0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea, 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710, 0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6, 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69, 0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c, 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, 0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812, 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, 0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1, 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b, 0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf, 0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, 0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545, 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, 0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349, 0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, 0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432, 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, 0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51, 0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, 0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab, 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, 0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5, 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, 0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc, 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, 0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2, 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, 0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728}
+
+func crc64(crc uint64, b []byte) uint64 {
+ for _, v := range b {
+ crc = table[byte(crc)^v] ^ (crc >> 8)
+ }
+ return crc
+}
+
+func Digest(b []byte) uint64 {
+ return crc64(0, b)
+}
+
+type digest struct {
+ crc uint64
+}
+
+func New() hash.Hash64 {
+ return &digest{}
+}
+
+func (h *digest) Write(p []byte) (int, error) {
+ h.crc = crc64(h.crc, p)
+ return len(p), nil
+}
+
+// Encode in little endian
+func (d *digest) Sum(in []byte) []byte {
+ s := d.Sum64()
+ in = append(in, byte(s))
+ in = append(in, byte(s>>8))
+ in = append(in, byte(s>>16))
+ in = append(in, byte(s>>24))
+ in = append(in, byte(s>>32))
+ in = append(in, byte(s>>40))
+ in = append(in, byte(s>>48))
+ in = append(in, byte(s>>56))
+ return in
+}
+
+func (d *digest) Sum64() uint64 { return d.crc }
+func (d *digest) BlockSize() int { return 1 }
+func (d *digest) Size() int { return 8 }
+func (d *digest) Reset() { d.crc = 0 }
diff --git a/vendor/github.com/cupcake/rdb/decoder.go b/vendor/github.com/cupcake/rdb/decoder.go
new file mode 100644
index 0000000..dd3993b
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/decoder.go
@@ -0,0 +1,824 @@
+// Package rdb implements parsing and encoding of the Redis RDB file format.
+package rdb
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+
+ "github.com/cupcake/rdb/crc64"
+)
+
+// A Decoder must be implemented to parse a RDB file.
+type Decoder interface {
+ // StartRDB is called when parsing of a valid RDB file starts.
+ StartRDB()
+ // StartDatabase is called when database n starts.
+ // Once a database starts, another database will not start until EndDatabase is called.
+ StartDatabase(n int)
+ // AUX field
+ Aux(key, value []byte)
+ // ResizeDB hint
+ ResizeDatabase(dbSize, expiresSize uint32)
+ // Set is called once for each string key.
+ Set(key, value []byte, expiry int64)
+ // StartHash is called at the beginning of a hash.
+ // Hset will be called exactly length times before EndHash.
+ StartHash(key []byte, length, expiry int64)
+ // Hset is called once for each field=value pair in a hash.
+ Hset(key, field, value []byte)
+ // EndHash is called when there are no more fields in a hash.
+ EndHash(key []byte)
+ // StartSet is called at the beginning of a set.
+ // Sadd will be called exactly cardinality times before EndSet.
+ StartSet(key []byte, cardinality, expiry int64)
+ // Sadd is called once for each member of a set.
+ Sadd(key, member []byte)
+ // EndSet is called when there are no more fields in a set.
+ EndSet(key []byte)
+ // StartList is called at the beginning of a list.
+ // Rpush will be called exactly length times before EndList.
+ // If length of the list is not known, then length is -1
+ StartList(key []byte, length, expiry int64)
+ // Rpush is called once for each value in a list.
+ Rpush(key, value []byte)
+ // EndList is called when there are no more values in a list.
+ EndList(key []byte)
+ // StartZSet is called at the beginning of a sorted set.
+ // Zadd will be called exactly cardinality times before EndZSet.
+ StartZSet(key []byte, cardinality, expiry int64)
+ // Zadd is called once for each member of a sorted set.
+ Zadd(key []byte, score float64, member []byte)
+ // EndZSet is called when there are no more members in a sorted set.
+ EndZSet(key []byte)
+ // EndDatabase is called at the end of a database.
+ EndDatabase(n int)
+ // EndRDB is called when parsing of the RDB file is complete.
+ EndRDB()
+}
+
+// Decode parses a RDB file from r and calls the decode hooks on d.
+func Decode(r io.Reader, d Decoder) error {
+ decoder := &decode{d, make([]byte, 8), bufio.NewReader(r)}
+ return decoder.decode()
+}
+
+// Decode a byte slice from the Redis DUMP command. The dump does not contain the
+// database, key or expiry, so they must be included in the function call (but
+// can be zero values).
+func DecodeDump(dump []byte, db int, key []byte, expiry int64, d Decoder) error {
+ err := verifyDump(dump)
+ if err != nil {
+ return err
+ }
+
+ decoder := &decode{d, make([]byte, 8), bytes.NewReader(dump[1:])}
+ decoder.event.StartRDB()
+ decoder.event.StartDatabase(db)
+
+ err = decoder.readObject(key, ValueType(dump[0]), expiry)
+
+ decoder.event.EndDatabase(db)
+ decoder.event.EndRDB()
+ return err
+}
+
+type byteReader interface {
+ io.Reader
+ io.ByteReader
+}
+
+type decode struct {
+ event Decoder
+ intBuf []byte
+ r byteReader
+}
+
+type ValueType byte
+
+const (
+ TypeString ValueType = 0
+ TypeList ValueType = 1
+ TypeSet ValueType = 2
+ TypeZSet ValueType = 3
+ TypeHash ValueType = 4
+
+ TypeHashZipmap ValueType = 9
+ TypeListZiplist ValueType = 10
+ TypeSetIntset ValueType = 11
+ TypeZSetZiplist ValueType = 12
+ TypeHashZiplist ValueType = 13
+ TypeListQuicklist ValueType = 14
+)
+
+const (
+ rdb6bitLen = 0
+ rdb14bitLen = 1
+ rdb32bitLen = 2
+ rdbEncVal = 3
+
+ rdbFlagAux = 0xfa
+ rdbFlagResizeDB = 0xfb
+ rdbFlagExpiryMS = 0xfc
+ rdbFlagExpiry = 0xfd
+ rdbFlagSelectDB = 0xfe
+ rdbFlagEOF = 0xff
+
+ rdbEncInt8 = 0
+ rdbEncInt16 = 1
+ rdbEncInt32 = 2
+ rdbEncLZF = 3
+
+ rdbZiplist6bitlenString = 0
+ rdbZiplist14bitlenString = 1
+ rdbZiplist32bitlenString = 2
+
+ rdbZiplistInt16 = 0xc0
+ rdbZiplistInt32 = 0xd0
+ rdbZiplistInt64 = 0xe0
+ rdbZiplistInt24 = 0xf0
+ rdbZiplistInt8 = 0xfe
+ rdbZiplistInt4 = 15
+)
+
+func (d *decode) decode() error {
+ err := d.checkHeader()
+ if err != nil {
+ return err
+ }
+
+ d.event.StartRDB()
+
+ var db uint32
+ var expiry int64
+ firstDB := true
+ for {
+ objType, err := d.r.ReadByte()
+ if err != nil {
+ return err
+ }
+ switch objType {
+ case rdbFlagAux:
+ auxKey, err := d.readString()
+ if err != nil {
+ return err
+ }
+ auxVal, err := d.readString()
+ if err != nil {
+ return err
+ }
+ d.event.Aux(auxKey, auxVal)
+ case rdbFlagResizeDB:
+ dbSize, _, err := d.readLength()
+ if err != nil {
+ return err
+ }
+ expiresSize, _, err := d.readLength()
+ if err != nil {
+ return err
+ }
+ d.event.ResizeDatabase(dbSize, expiresSize)
+ case rdbFlagExpiryMS:
+ _, err := io.ReadFull(d.r, d.intBuf)
+ if err != nil {
+ return err
+ }
+ expiry = int64(binary.LittleEndian.Uint64(d.intBuf))
+ case rdbFlagExpiry:
+ _, err := io.ReadFull(d.r, d.intBuf[:4])
+ if err != nil {
+ return err
+ }
+ expiry = int64(binary.LittleEndian.Uint32(d.intBuf)) * 1000
+ case rdbFlagSelectDB:
+ if !firstDB {
+ d.event.EndDatabase(int(db))
+ }
+ db, _, err = d.readLength()
+ if err != nil {
+ return err
+ }
+ d.event.StartDatabase(int(db))
+ case rdbFlagEOF:
+ d.event.EndDatabase(int(db))
+ d.event.EndRDB()
+ return nil
+ default:
+ key, err := d.readString()
+ if err != nil {
+ return err
+ }
+ err = d.readObject(key, ValueType(objType), expiry)
+ if err != nil {
+ return err
+ }
+ expiry = 0
+ }
+ }
+
+ panic("not reached")
+}
+
+func (d *decode) readObject(key []byte, typ ValueType, expiry int64) error {
+ switch typ {
+ case TypeString:
+ value, err := d.readString()
+ if err != nil {
+ return err
+ }
+ d.event.Set(key, value, expiry)
+ case TypeList:
+ length, _, err := d.readLength()
+ if err != nil {
+ return err
+ }
+ d.event.StartList(key, int64(length), expiry)
+ for i := uint32(0); i < length; i++ {
+ value, err := d.readString()
+ if err != nil {
+ return err
+ }
+ d.event.Rpush(key, value)
+ }
+ d.event.EndList(key)
+ case TypeListQuicklist:
+ length, _, err := d.readLength()
+ if err != nil {
+ return err
+ }
+ d.event.StartList(key, int64(-1), expiry)
+ for i := uint32(0); i < length; i++ {
+ d.readZiplist(key, 0, false)
+ }
+ d.event.EndList(key)
+ case TypeSet:
+ cardinality, _, err := d.readLength()
+ if err != nil {
+ return err
+ }
+ d.event.StartSet(key, int64(cardinality), expiry)
+ for i := uint32(0); i < cardinality; i++ {
+ member, err := d.readString()
+ if err != nil {
+ return err
+ }
+ d.event.Sadd(key, member)
+ }
+ d.event.EndSet(key)
+ case TypeZSet:
+ cardinality, _, err := d.readLength()
+ if err != nil {
+ return err
+ }
+ d.event.StartZSet(key, int64(cardinality), expiry)
+ for i := uint32(0); i < cardinality; i++ {
+ member, err := d.readString()
+ if err != nil {
+ return err
+ }
+ score, err := d.readFloat64()
+ if err != nil {
+ return err
+ }
+ d.event.Zadd(key, score, member)
+ }
+ d.event.EndZSet(key)
+ case TypeHash:
+ length, _, err := d.readLength()
+ if err != nil {
+ return err
+ }
+ d.event.StartHash(key, int64(length), expiry)
+ for i := uint32(0); i < length; i++ {
+ field, err := d.readString()
+ if err != nil {
+ return err
+ }
+ value, err := d.readString()
+ if err != nil {
+ return err
+ }
+ d.event.Hset(key, field, value)
+ }
+ d.event.EndHash(key)
+ case TypeHashZipmap:
+ return d.readZipmap(key, expiry)
+ case TypeListZiplist:
+ return d.readZiplist(key, expiry, true)
+ case TypeSetIntset:
+ return d.readIntset(key, expiry)
+ case TypeZSetZiplist:
+ return d.readZiplistZset(key, expiry)
+ case TypeHashZiplist:
+ return d.readZiplistHash(key, expiry)
+ default:
+ return fmt.Errorf("rdb: unknown object type %d for key %s", typ, key)
+ }
+ return nil
+}
+
+func (d *decode) readZipmap(key []byte, expiry int64) error {
+ var length int
+ zipmap, err := d.readString()
+ if err != nil {
+ return err
+ }
+ buf := newSliceBuffer(zipmap)
+ lenByte, err := buf.ReadByte()
+ if err != nil {
+ return err
+ }
+ if lenByte >= 254 { // we need to count the items manually
+ length, err = countZipmapItems(buf)
+ length /= 2
+ if err != nil {
+ return err
+ }
+ } else {
+ length = int(lenByte)
+ }
+ d.event.StartHash(key, int64(length), expiry)
+ for i := 0; i < length; i++ {
+ field, err := readZipmapItem(buf, false)
+ if err != nil {
+ return err
+ }
+ value, err := readZipmapItem(buf, true)
+ if err != nil {
+ return err
+ }
+ d.event.Hset(key, field, value)
+ }
+ d.event.EndHash(key)
+ return nil
+}
+
+func readZipmapItem(buf *sliceBuffer, readFree bool) ([]byte, error) {
+ length, free, err := readZipmapItemLength(buf, readFree)
+ if err != nil {
+ return nil, err
+ }
+ if length == -1 {
+ return nil, nil
+ }
+ value, err := buf.Slice(length)
+ if err != nil {
+ return nil, err
+ }
+ _, err = buf.Seek(int64(free), 1)
+ return value, err
+}
+
+func countZipmapItems(buf *sliceBuffer) (int, error) {
+ n := 0
+ for {
+ strLen, free, err := readZipmapItemLength(buf, n%2 != 0)
+ if err != nil {
+ return 0, err
+ }
+ if strLen == -1 {
+ break
+ }
+ _, err = buf.Seek(int64(strLen)+int64(free), 1)
+ if err != nil {
+ return 0, err
+ }
+ n++
+ }
+ _, err := buf.Seek(0, 0)
+ return n, err
+}
+
+func readZipmapItemLength(buf *sliceBuffer, readFree bool) (int, int, error) {
+ b, err := buf.ReadByte()
+ if err != nil {
+ return 0, 0, err
+ }
+ switch b {
+ case 253:
+ s, err := buf.Slice(5)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(binary.BigEndian.Uint32(s)), int(s[4]), nil
+ case 254:
+ return 0, 0, fmt.Errorf("rdb: invalid zipmap item length")
+ case 255:
+ return -1, 0, nil
+ }
+ var free byte
+ if readFree {
+ free, err = buf.ReadByte()
+ }
+ return int(b), int(free), err
+}
+
+func (d *decode) readZiplist(key []byte, expiry int64, addListEvents bool) error {
+ ziplist, err := d.readString()
+ if err != nil {
+ return err
+ }
+ buf := newSliceBuffer(ziplist)
+ length, err := readZiplistLength(buf)
+ if err != nil {
+ return err
+ }
+ if addListEvents {
+ d.event.StartList(key, length, expiry)
+ }
+ for i := int64(0); i < length; i++ {
+ entry, err := readZiplistEntry(buf)
+ if err != nil {
+ return err
+ }
+ d.event.Rpush(key, entry)
+ }
+ if addListEvents {
+ d.event.EndList(key)
+ }
+ return nil
+}
+
+func (d *decode) readZiplistZset(key []byte, expiry int64) error {
+ ziplist, err := d.readString()
+ if err != nil {
+ return err
+ }
+ buf := newSliceBuffer(ziplist)
+ cardinality, err := readZiplistLength(buf)
+ if err != nil {
+ return err
+ }
+ cardinality /= 2
+ d.event.StartZSet(key, cardinality, expiry)
+ for i := int64(0); i < cardinality; i++ {
+ member, err := readZiplistEntry(buf)
+ if err != nil {
+ return err
+ }
+ scoreBytes, err := readZiplistEntry(buf)
+ if err != nil {
+ return err
+ }
+ score, err := strconv.ParseFloat(string(scoreBytes), 64)
+ if err != nil {
+ return err
+ }
+ d.event.Zadd(key, score, member)
+ }
+ d.event.EndZSet(key)
+ return nil
+}
+
+func (d *decode) readZiplistHash(key []byte, expiry int64) error {
+ ziplist, err := d.readString()
+ if err != nil {
+ return err
+ }
+ buf := newSliceBuffer(ziplist)
+ length, err := readZiplistLength(buf)
+ if err != nil {
+ return err
+ }
+ length /= 2
+ d.event.StartHash(key, length, expiry)
+ for i := int64(0); i < length; i++ {
+ field, err := readZiplistEntry(buf)
+ if err != nil {
+ return err
+ }
+ value, err := readZiplistEntry(buf)
+ if err != nil {
+ return err
+ }
+ d.event.Hset(key, field, value)
+ }
+ d.event.EndHash(key)
+ return nil
+}
+
+func readZiplistLength(buf *sliceBuffer) (int64, error) {
+ buf.Seek(8, 0) // skip the zlbytes and zltail
+ lenBytes, err := buf.Slice(2)
+ if err != nil {
+ return 0, err
+ }
+ return int64(binary.LittleEndian.Uint16(lenBytes)), nil
+}
+
+func readZiplistEntry(buf *sliceBuffer) ([]byte, error) {
+ prevLen, err := buf.ReadByte()
+ if err != nil {
+ return nil, err
+ }
+ if prevLen == 254 {
+ buf.Seek(4, 1) // skip the 4-byte prevlen
+ }
+
+ header, err := buf.ReadByte()
+ if err != nil {
+ return nil, err
+ }
+ switch {
+ case header>>6 == rdbZiplist6bitlenString:
+ return buf.Slice(int(header & 0x3f))
+ case header>>6 == rdbZiplist14bitlenString:
+ b, err := buf.ReadByte()
+ if err != nil {
+ return nil, err
+ }
+ return buf.Slice((int(header&0x3f) << 8) | int(b))
+ case header>>6 == rdbZiplist32bitlenString:
+ lenBytes, err := buf.Slice(4)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Slice(int(binary.BigEndian.Uint32(lenBytes)))
+ case header == rdbZiplistInt16:
+ intBytes, err := buf.Slice(2)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10)), nil
+ case header == rdbZiplistInt32:
+ intBytes, err := buf.Slice(4)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10)), nil
+ case header == rdbZiplistInt64:
+ intBytes, err := buf.Slice(8)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(strconv.FormatInt(int64(binary.LittleEndian.Uint64(intBytes)), 10)), nil
+ case header == rdbZiplistInt24:
+ intBytes := make([]byte, 4)
+ _, err := buf.Read(intBytes[1:])
+ if err != nil {
+ return nil, err
+ }
+ return []byte(strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))>>8), 10)), nil
+ case header == rdbZiplistInt8:
+ b, err := buf.ReadByte()
+ return []byte(strconv.FormatInt(int64(int8(b)), 10)), err
+ case header>>4 == rdbZiplistInt4:
+ return []byte(strconv.FormatInt(int64(header&0x0f)-1, 10)), nil
+ }
+
+ return nil, fmt.Errorf("rdb: unknown ziplist header byte: %d", header)
+}
+
+func (d *decode) readIntset(key []byte, expiry int64) error {
+ intset, err := d.readString()
+ if err != nil {
+ return err
+ }
+ buf := newSliceBuffer(intset)
+ intSizeBytes, err := buf.Slice(4)
+ if err != nil {
+ return err
+ }
+ intSize := binary.LittleEndian.Uint32(intSizeBytes)
+
+ if intSize != 2 && intSize != 4 && intSize != 8 {
+ return fmt.Errorf("rdb: unknown intset encoding: %d", intSize)
+ }
+
+ lenBytes, err := buf.Slice(4)
+ if err != nil {
+ return err
+ }
+ cardinality := binary.LittleEndian.Uint32(lenBytes)
+
+ d.event.StartSet(key, int64(cardinality), expiry)
+ for i := uint32(0); i < cardinality; i++ {
+ intBytes, err := buf.Slice(int(intSize))
+ if err != nil {
+ return err
+ }
+ var intString string
+ switch intSize {
+ case 2:
+ intString = strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10)
+ case 4:
+ intString = strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10)
+ case 8:
+ intString = strconv.FormatInt(int64(int64(binary.LittleEndian.Uint64(intBytes))), 10)
+ }
+ d.event.Sadd(key, []byte(intString))
+ }
+ d.event.EndSet(key)
+ return nil
+}
+
+func (d *decode) checkHeader() error {
+ header := make([]byte, 9)
+ _, err := io.ReadFull(d.r, header)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(header[:5], []byte("REDIS")) {
+ return fmt.Errorf("rdb: invalid file format")
+ }
+
+ version, _ := strconv.ParseInt(string(header[5:]), 10, 64)
+ if version < 1 || version > 7 {
+ return fmt.Errorf("rdb: invalid RDB version number %d", version)
+ }
+
+ return nil
+}
+
+func (d *decode) readString() ([]byte, error) {
+ length, encoded, err := d.readLength()
+ if err != nil {
+ return nil, err
+ }
+ if encoded {
+ switch length {
+ case rdbEncInt8:
+ i, err := d.readUint8()
+ return []byte(strconv.FormatInt(int64(int8(i)), 10)), err
+ case rdbEncInt16:
+ i, err := d.readUint16()
+ return []byte(strconv.FormatInt(int64(int16(i)), 10)), err
+ case rdbEncInt32:
+ i, err := d.readUint32()
+ return []byte(strconv.FormatInt(int64(int32(i)), 10)), err
+ case rdbEncLZF:
+ clen, _, err := d.readLength()
+ if err != nil {
+ return nil, err
+ }
+ ulen, _, err := d.readLength()
+ if err != nil {
+ return nil, err
+ }
+ compressed := make([]byte, clen)
+ _, err = io.ReadFull(d.r, compressed)
+ if err != nil {
+ return nil, err
+ }
+ decompressed := lzfDecompress(compressed, int(ulen))
+ if len(decompressed) != int(ulen) {
+ return nil, fmt.Errorf("decompressed string length %d didn't match expected length %d", len(decompressed), ulen)
+ }
+ return decompressed, nil
+ }
+ }
+
+ str := make([]byte, length)
+ _, err = io.ReadFull(d.r, str)
+ return str, err
+}
+
+func (d *decode) readUint8() (uint8, error) {
+ b, err := d.r.ReadByte()
+ return uint8(b), err
+}
+
+func (d *decode) readUint16() (uint16, error) {
+ _, err := io.ReadFull(d.r, d.intBuf[:2])
+ if err != nil {
+ return 0, err
+ }
+ return binary.LittleEndian.Uint16(d.intBuf), nil
+}
+
+func (d *decode) readUint32() (uint32, error) {
+ _, err := io.ReadFull(d.r, d.intBuf[:4])
+ if err != nil {
+ return 0, err
+ }
+ return binary.LittleEndian.Uint32(d.intBuf), nil
+}
+
+func (d *decode) readUint64() (uint64, error) {
+ _, err := io.ReadFull(d.r, d.intBuf)
+ if err != nil {
+ return 0, err
+ }
+ return binary.LittleEndian.Uint64(d.intBuf), nil
+}
+
+func (d *decode) readUint32Big() (uint32, error) {
+ _, err := io.ReadFull(d.r, d.intBuf[:4])
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint32(d.intBuf), nil
+}
+
+// Doubles are saved as strings prefixed by an unsigned
+// 8 bit integer specifying the length of the representation.
+// This 8 bit integer has special values in order to specify the following
+// conditions:
+// 253: not a number
+// 254: + inf
+// 255: - inf
+func (d *decode) readFloat64() (float64, error) {
+ length, err := d.readUint8()
+ if err != nil {
+ return 0, err
+ }
+ switch length {
+ case 253:
+ return math.NaN(), nil
+ case 254:
+ return math.Inf(0), nil
+ case 255:
+ return math.Inf(-1), nil
+ default:
+ floatBytes := make([]byte, length)
+ _, err := io.ReadFull(d.r, floatBytes)
+ if err != nil {
+ return 0, err
+ }
+ f, err := strconv.ParseFloat(string(floatBytes), 64)
+ return f, err
+ }
+
+ panic("not reached")
+}
+
+func (d *decode) readLength() (uint32, bool, error) {
+ b, err := d.r.ReadByte()
+ if err != nil {
+ return 0, false, err
+ }
+ // The first two bits of the first byte are used to indicate the length encoding type
+ switch (b & 0xc0) >> 6 {
+ case rdb6bitLen:
+ // When the first two bits are 00, the next 6 bits are the length.
+ return uint32(b & 0x3f), false, nil
+ case rdb14bitLen:
+ // When the first two bits are 01, the next 14 bits are the length.
+ bb, err := d.r.ReadByte()
+ if err != nil {
+ return 0, false, err
+ }
+ return (uint32(b&0x3f) << 8) | uint32(bb), false, nil
+ case rdbEncVal:
+ // When the first two bits are 11, the next object is encoded.
+ // The next 6 bits indicate the encoding type.
+ return uint32(b & 0x3f), true, nil
+ default:
+ // When the first two bits are 10, the next 6 bits are discarded.
+ // The next 4 bytes are the length.
+ length, err := d.readUint32Big()
+ return length, false, err
+ }
+
+ panic("not reached")
+}
+
+func verifyDump(d []byte) error {
+ if len(d) < 10 {
+ return fmt.Errorf("rdb: invalid dump length")
+ }
+ version := binary.LittleEndian.Uint16(d[len(d)-10:])
+ if version != uint16(Version) {
+ return fmt.Errorf("rdb: invalid version %d, expecting %d", version, Version)
+ }
+
+ if binary.LittleEndian.Uint64(d[len(d)-8:]) != crc64.Digest(d[:len(d)-8]) {
+ return fmt.Errorf("rdb: invalid CRC checksum")
+ }
+
+ return nil
+}
+
+func lzfDecompress(in []byte, outlen int) []byte {
+ out := make([]byte, outlen)
+ for i, o := 0, 0; i < len(in); {
+ ctrl := int(in[i])
+ i++
+ if ctrl < 32 {
+ for x := 0; x <= ctrl; x++ {
+ out[o] = in[i]
+ i++
+ o++
+ }
+ } else {
+ length := ctrl >> 5
+ if length == 7 {
+ length = length + int(in[i])
+ i++
+ }
+ ref := o - ((ctrl & 0x1f) << 8) - int(in[i]) - 1
+ i++
+ for x := 0; x <= length+1; x++ {
+ out[o] = out[ref]
+ ref++
+ o++
+ }
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/cupcake/rdb/encoder.go b/vendor/github.com/cupcake/rdb/encoder.go
new file mode 100644
index 0000000..7902a7d
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/encoder.go
@@ -0,0 +1,130 @@
+package rdb
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "io"
+ "math"
+ "strconv"
+
+ "github.com/cupcake/rdb/crc64"
+)
+
+const Version = 6
+
+type Encoder struct {
+ w io.Writer
+ crc hash.Hash
+}
+
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{crc: crc64.New()}
+ e.w = io.MultiWriter(w, e.crc)
+ return e
+}
+
+func (e *Encoder) EncodeHeader() error {
+ _, err := fmt.Fprintf(e.w, "REDIS%04d", Version)
+ return err
+}
+
+func (e *Encoder) EncodeFooter() error {
+ e.w.Write([]byte{rdbFlagEOF})
+ _, err := e.w.Write(e.crc.Sum(nil))
+ return err
+}
+
+func (e *Encoder) EncodeDumpFooter() error {
+ binary.Write(e.w, binary.LittleEndian, uint16(Version))
+ _, err := e.w.Write(e.crc.Sum(nil))
+ return err
+}
+
+func (e *Encoder) EncodeDatabase(n int) error {
+ e.w.Write([]byte{rdbFlagSelectDB})
+ return e.EncodeLength(uint32(n))
+}
+
+func (e *Encoder) EncodeExpiry(expiry uint64) error {
+ b := make([]byte, 9)
+ b[0] = rdbFlagExpiryMS
+ binary.LittleEndian.PutUint64(b[1:], expiry)
+ _, err := e.w.Write(b)
+ return err
+}
+
+func (e *Encoder) EncodeType(v ValueType) error {
+ _, err := e.w.Write([]byte{byte(v)})
+ return err
+}
+
+func (e *Encoder) EncodeString(s []byte) error {
+ written, err := e.encodeIntString(s)
+ if written {
+ return err
+ }
+ e.EncodeLength(uint32(len(s)))
+ _, err = e.w.Write(s)
+ return err
+}
+
+func (e *Encoder) EncodeLength(l uint32) (err error) {
+ switch {
+ case l < 1<<6:
+ _, err = e.w.Write([]byte{byte(l)})
+ case l < 1<<14:
+ _, err = e.w.Write([]byte{byte(l>>8) | rdb14bitLen<<6, byte(l)})
+ default:
+ b := make([]byte, 5)
+ b[0] = rdb32bitLen << 6
+ binary.BigEndian.PutUint32(b[1:], l)
+ _, err = e.w.Write(b)
+ }
+ return
+}
+
+func (e *Encoder) EncodeFloat(f float64) (err error) {
+ switch {
+ case math.IsNaN(f):
+ _, err = e.w.Write([]byte{253})
+ case math.IsInf(f, 1):
+ _, err = e.w.Write([]byte{254})
+ case math.IsInf(f, -1):
+ _, err = e.w.Write([]byte{255})
+ default:
+ b := []byte(strconv.FormatFloat(f, 'g', 17, 64))
+ e.w.Write([]byte{byte(len(b))})
+ _, err = e.w.Write(b)
+ }
+ return
+}
+
+func (e *Encoder) encodeIntString(b []byte) (written bool, err error) {
+ s := string(b)
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return
+ }
+ // if the stringified parsed int isn't exactly the same, we can't encode it as an int
+ if s != strconv.FormatInt(i, 10) {
+ return
+ }
+ switch {
+ case i >= math.MinInt8 && i <= math.MaxInt8:
+ _, err = e.w.Write([]byte{rdbEncVal << 6, byte(int8(i))})
+ case i >= math.MinInt16 && i <= math.MaxInt16:
+ b := make([]byte, 3)
+ b[0] = rdbEncVal<<6 | rdbEncInt16
+ binary.LittleEndian.PutUint16(b[1:], uint16(int16(i)))
+ _, err = e.w.Write(b)
+ case i >= math.MinInt32 && i <= math.MaxInt32:
+ b := make([]byte, 5)
+ b[0] = rdbEncVal<<6 | rdbEncInt32
+ binary.LittleEndian.PutUint32(b[1:], uint32(int32(i)))
+ _, err = e.w.Write(b)
+ default:
+ return
+ }
+ return true, err
+}
diff --git a/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go b/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go
new file mode 100644
index 0000000..de93a69
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go
@@ -0,0 +1,24 @@
+package nopdecoder
+
+// NopDecoder may be embedded in a real Decoder to avoid implementing methods.
+type NopDecoder struct{}
+
+func (d NopDecoder) StartRDB() {}
+func (d NopDecoder) StartDatabase(n int) {}
+func (d NopDecoder) Aux(key, value []byte) {}
+func (d NopDecoder) ResizeDatabase(dbSize, expiresSize uint32) {}
+func (d NopDecoder) EndDatabase(n int) {}
+func (d NopDecoder) EndRDB() {}
+func (d NopDecoder) Set(key, value []byte, expiry int64) {}
+func (d NopDecoder) StartHash(key []byte, length, expiry int64) {}
+func (d NopDecoder) Hset(key, field, value []byte) {}
+func (d NopDecoder) EndHash(key []byte) {}
+func (d NopDecoder) StartSet(key []byte, cardinality, expiry int64) {}
+func (d NopDecoder) Sadd(key, member []byte) {}
+func (d NopDecoder) EndSet(key []byte) {}
+func (d NopDecoder) StartList(key []byte, length, expiry int64) {}
+func (d NopDecoder) Rpush(key, value []byte) {}
+func (d NopDecoder) EndList(key []byte) {}
+func (d NopDecoder) StartZSet(key []byte, cardinality, expiry int64) {}
+func (d NopDecoder) Zadd(key []byte, score float64, member []byte) {}
+func (d NopDecoder) EndZSet(key []byte) {}
diff --git a/vendor/github.com/cupcake/rdb/slice_buffer.go b/vendor/github.com/cupcake/rdb/slice_buffer.go
new file mode 100644
index 0000000..b3e12a0
--- /dev/null
+++ b/vendor/github.com/cupcake/rdb/slice_buffer.go
@@ -0,0 +1,67 @@
+package rdb
+
+import (
+ "errors"
+ "io"
+)
+
+type sliceBuffer struct {
+ s []byte
+ i int
+}
+
+func newSliceBuffer(s []byte) *sliceBuffer {
+ return &sliceBuffer{s, 0}
+}
+
+func (s *sliceBuffer) Slice(n int) ([]byte, error) {
+ if s.i+n > len(s.s) {
+ return nil, io.EOF
+ }
+ b := s.s[s.i : s.i+n]
+ s.i += n
+ return b, nil
+}
+
+func (s *sliceBuffer) ReadByte() (byte, error) {
+ if s.i >= len(s.s) {
+ return 0, io.EOF
+ }
+ b := s.s[s.i]
+ s.i++
+ return b, nil
+}
+
+func (s *sliceBuffer) Read(b []byte) (int, error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ if s.i >= len(s.s) {
+ return 0, io.EOF
+ }
+ n := copy(b, s.s[s.i:])
+ s.i += n
+ return n, nil
+}
+
+func (s *sliceBuffer) Seek(offset int64, whence int) (int64, error) {
+ var abs int64
+ switch whence {
+ case 0:
+ abs = offset
+ case 1:
+ abs = int64(s.i) + offset
+ case 2:
+ abs = int64(len(s.s)) + offset
+ default:
+ return 0, errors.New("invalid whence")
+ }
+ if abs < 0 {
+ return 0, errors.New("negative position")
+ }
+ if abs >= 1<<31 {
+ return 0, errors.New("position out of range")
+ }
+ s.i = int(abs)
+ return abs, nil
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/edsrzf/mmap-go/.gitignore b/vendor/github.com/edsrzf/mmap-go/.gitignore
new file mode 100644
index 0000000..9aa02c1
--- /dev/null
+++ b/vendor/github.com/edsrzf/mmap-go/.gitignore
@@ -0,0 +1,8 @@
+*.out
+*.5
+*.6
+*.8
+*.swp
+_obj
+_test
+testdata
diff --git a/vendor/github.com/edsrzf/mmap-go/LICENSE b/vendor/github.com/edsrzf/mmap-go/LICENSE
new file mode 100644
index 0000000..8f05f33
--- /dev/null
+++ b/vendor/github.com/edsrzf/mmap-go/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2011, Evan Shaw
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the copyright holder nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/edsrzf/mmap-go/README.md b/vendor/github.com/edsrzf/mmap-go/README.md
new file mode 100644
index 0000000..4cc2bfe
--- /dev/null
+++ b/vendor/github.com/edsrzf/mmap-go/README.md
@@ -0,0 +1,12 @@
+mmap-go
+=======
+
+mmap-go is a portable mmap package for the [Go programming language](http://golang.org).
+It has been tested on Linux (386, amd64), OS X, and Windows (386). It should also
+work on other Unix-like platforms, but hasn't been tested with them. I'm interested
+to hear about the results.
+
+I haven't been able to add more features without adding significant complexity,
+so mmap-go doesn't support mprotect, mincore, and maybe a few other things.
+If you're running on a Unix-like platform and need some of these features,
+I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap).
diff --git a/vendor/github.com/edsrzf/mmap-go/mmap.go b/vendor/github.com/edsrzf/mmap-go/mmap.go
new file mode 100644
index 0000000..29655bd
--- /dev/null
+++ b/vendor/github.com/edsrzf/mmap-go/mmap.go
@@ -0,0 +1,117 @@
+// Copyright 2011 Evan Shaw. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines the common package interface and contains a little bit of
+// factored out logic.
+
+// Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface,
+// but doesn't go out of its way to abstract away every little platform detail.
+// This specifically means:
+// * forked processes may or may not inherit mappings
+// * a file's timestamp may or may not be updated by writes through mappings
+// * specifying a size larger than the file's actual size can increase the file's size
+// * If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms
+package mmap
+
+import (
+ "errors"
+ "os"
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // RDONLY maps the memory read-only.
+ // Attempts to write to the MMap object will result in undefined behavior.
+ RDONLY = 0
+ // RDWR maps the memory as read-write. Writes to the MMap object will update the
+ // underlying file.
+ RDWR = 1 << iota
+ // COPY maps the memory as copy-on-write. Writes to the MMap object will affect
+ // memory, but the underlying file will remain unchanged.
+ COPY
+ // If EXEC is set, the mapped memory is marked as executable.
+ EXEC
+)
+
+const (
+ // If the ANON flag is set, the mapped memory will not be backed by a file.
+ ANON = 1 << iota
+)
+
+// MMap represents a file mapped into memory.
+type MMap []byte
+
+// Map maps an entire file into memory.
+// If ANON is set in flags, f is ignored.
+func Map(f *os.File, prot, flags int) (MMap, error) {
+ return MapRegion(f, -1, prot, flags, 0)
+}
+
+// MapRegion maps part of a file into memory.
+// The offset parameter must be a multiple of the system's page size.
+// If length < 0, the entire file will be mapped.
+// If ANON is set in flags, f is ignored.
+func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) {
+ if offset%int64(os.Getpagesize()) != 0 {
+ return nil, errors.New("offset parameter must be a multiple of the system's page size")
+ }
+
+ var fd uintptr
+ if flags&ANON == 0 {
+ fd = uintptr(f.Fd())
+ if length < 0 {
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ length = int(fi.Size())
+ }
+ } else {
+ if length <= 0 {
+ return nil, errors.New("anonymous mapping requires non-zero length")
+ }
+ fd = ^uintptr(0)
+ }
+ return mmap(length, uintptr(prot), uintptr(flags), fd, offset)
+}
+
+func (m *MMap) header() *reflect.SliceHeader {
+ return (*reflect.SliceHeader)(unsafe.Pointer(m))
+}
+
+func (m *MMap) addrLen() (uintptr, uintptr) {
+ header := m.header()
+ return header.Data, uintptr(header.Len)
+}
+
+// Lock keeps the mapped region in physical memory, ensuring that it will not be
+// swapped out.
+func (m MMap) Lock() error {
+ return m.lock()
+}
+
+// Unlock reverses the effect of Lock, allowing the mapped region to potentially
+// be swapped out.
+// If m is already unlocked, aan error will result.
+func (m MMap) Unlock() error {
+ return m.unlock()
+}
+
+// Flush synchronizes the mapping's contents to the file's contents on disk.
+func (m MMap) Flush() error {
+ return m.flush()
+}
+
+// Unmap deletes the memory mapped region, flushes any remaining changes, and sets
+// m to nil.
+// Trying to read or write any remaining references to m after Unmap is called will
+// result in undefined behavior.
+// Unmap should only be called on the slice value that was originally returned from
+// a call to Map. Calling Unmap on a derived slice may cause errors.
+func (m *MMap) Unmap() error {
+ err := m.unmap()
+ *m = nil
+ return err
+}
diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_unix.go b/vendor/github.com/edsrzf/mmap-go/mmap_unix.go
new file mode 100644
index 0000000..25b13e5
--- /dev/null
+++ b/vendor/github.com/edsrzf/mmap-go/mmap_unix.go
@@ -0,0 +1,51 @@
+// Copyright 2011 Evan Shaw. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux openbsd solaris netbsd
+
+package mmap
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) {
+ flags := unix.MAP_SHARED
+ prot := unix.PROT_READ
+ switch {
+ case inprot© != 0:
+ prot |= unix.PROT_WRITE
+ flags = unix.MAP_PRIVATE
+ case inprot&RDWR != 0:
+ prot |= unix.PROT_WRITE
+ }
+ if inprot&EXEC != 0 {
+ prot |= unix.PROT_EXEC
+ }
+ if inflags&ANON != 0 {
+ flags |= unix.MAP_ANON
+ }
+
+ b, err := unix.Mmap(int(fd), off, len, prot, flags)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func (m MMap) flush() error {
+ return unix.Msync([]byte(m), unix.MS_SYNC)
+}
+
+func (m MMap) lock() error {
+ return unix.Mlock([]byte(m))
+}
+
+func (m MMap) unlock() error {
+ return unix.Munlock([]byte(m))
+}
+
+func (m MMap) unmap() error {
+ return unix.Munmap([]byte(m))
+}
diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_windows.go b/vendor/github.com/edsrzf/mmap-go/mmap_windows.go
new file mode 100644
index 0000000..7910da2
--- /dev/null
+++ b/vendor/github.com/edsrzf/mmap-go/mmap_windows.go
@@ -0,0 +1,143 @@
+// Copyright 2011 Evan Shaw. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mmap
+
+import (
+ "errors"
+ "os"
+ "sync"
+
+ "golang.org/x/sys/windows"
+)
+
+// mmap on Windows is a two-step process.
+// First, we call CreateFileMapping to get a handle.
+// Then, we call MapviewToFile to get an actual pointer into memory.
+// Because we want to emulate a POSIX-style mmap, we don't want to expose
+// the handle -- only the pointer. We also want to return only a byte slice,
+// not a struct, so it's convenient to manipulate.
+
+// We keep this map so that we can get back the original handle from the memory address.
+
+type addrinfo struct {
+ file windows.Handle
+ mapview windows.Handle
+}
+
+var handleLock sync.Mutex
+var handleMap = map[uintptr]*addrinfo{}
+
+func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) {
+ flProtect := uint32(windows.PAGE_READONLY)
+ dwDesiredAccess := uint32(windows.FILE_MAP_READ)
+ switch {
+ case prot© != 0:
+ flProtect = windows.PAGE_WRITECOPY
+ dwDesiredAccess = windows.FILE_MAP_COPY
+ case prot&RDWR != 0:
+ flProtect = windows.PAGE_READWRITE
+ dwDesiredAccess = windows.FILE_MAP_WRITE
+ }
+ if prot&EXEC != 0 {
+ flProtect <<= 4
+ dwDesiredAccess |= windows.FILE_MAP_EXECUTE
+ }
+
+ // The maximum size is the area of the file, starting from 0,
+ // that we wish to allow to be mappable. It is the sum of
+ // the length the user requested, plus the offset where that length
+ // is starting from. This does not map the data into memory.
+ maxSizeHigh := uint32((off + int64(len)) >> 32)
+ maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF)
+ // TODO: Do we need to set some security attributes? It might help portability.
+ h, errno := windows.CreateFileMapping(windows.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil)
+ if h == 0 {
+ return nil, os.NewSyscallError("CreateFileMapping", errno)
+ }
+
+ // Actually map a view of the data into memory. The view's size
+ // is the length the user requested.
+ fileOffsetHigh := uint32(off >> 32)
+ fileOffsetLow := uint32(off & 0xFFFFFFFF)
+ addr, errno := windows.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len))
+ if addr == 0 {
+ return nil, os.NewSyscallError("MapViewOfFile", errno)
+ }
+ handleLock.Lock()
+ handleMap[addr] = &addrinfo{
+ file: windows.Handle(hfile),
+ mapview: h,
+ }
+ handleLock.Unlock()
+
+ m := MMap{}
+ dh := m.header()
+ dh.Data = addr
+ dh.Len = len
+ dh.Cap = dh.Len
+
+ return m, nil
+}
+
+func (m MMap) flush() error {
+ addr, len := m.addrLen()
+ errno := windows.FlushViewOfFile(addr, len)
+ if errno != nil {
+ return os.NewSyscallError("FlushViewOfFile", errno)
+ }
+
+ handleLock.Lock()
+ defer handleLock.Unlock()
+ handle, ok := handleMap[addr]
+ if !ok {
+ // should be impossible; we would've errored above
+ return errors.New("unknown base address")
+ }
+
+ errno = windows.FlushFileBuffers(handle.file)
+ return os.NewSyscallError("FlushFileBuffers", errno)
+}
+
+func (m MMap) lock() error {
+ addr, len := m.addrLen()
+ errno := windows.VirtualLock(addr, len)
+ return os.NewSyscallError("VirtualLock", errno)
+}
+
+func (m MMap) unlock() error {
+ addr, len := m.addrLen()
+ errno := windows.VirtualUnlock(addr, len)
+ return os.NewSyscallError("VirtualUnlock", errno)
+}
+
+func (m MMap) unmap() error {
+ err := m.flush()
+ if err != nil {
+ return err
+ }
+
+ addr := m.header().Data
+ // Lock the UnmapViewOfFile along with the handleMap deletion.
+ // As soon as we unmap the view, the OS is free to give the
+ // same addr to another new map. We don't want another goroutine
+ // to insert and remove the same addr into handleMap while
+ // we're trying to remove our old addr/handle pair.
+ handleLock.Lock()
+ defer handleLock.Unlock()
+ err = windows.UnmapViewOfFile(addr)
+ if err != nil {
+ return err
+ }
+
+ handle, ok := handleMap[addr]
+ if !ok {
+ // should be impossible; we would've errored above
+ return errors.New("unknown base address")
+ }
+ delete(handleMap, addr)
+
+ e := windows.CloseHandle(windows.Handle(handle.mapview))
+ return os.NewSyscallError("CloseHandle", e)
+}
diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE b/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE
index 66202e1..5782c72 100644
--- a/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE
+++ b/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE
@@ -1,23 +1,23 @@
-Copyright (c) 2014, Elazar Leibovich
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+Copyright (c) 2014, Elazar Leibovich
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/README.md b/vendor/github.com/elazarl/go-bindata-assetfs/README.md
index e12da21..27ee48f 100644
--- a/vendor/github.com/elazarl/go-bindata-assetfs/README.md
+++ b/vendor/github.com/elazarl/go-bindata-assetfs/README.md
@@ -1,46 +1,46 @@
-# go-bindata-assetfs
-
-Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`.
-
-[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs)
-
-### Installation
-
-Install with
-
- $ go get github.com/jteeuwen/go-bindata/...
- $ go get github.com/elazarl/go-bindata-assetfs/...
-
-### Creating embedded data
-
-Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage,
-instead of running `go-bindata` run `go-bindata-assetfs`.
-
-The tool will create a `bindata_assetfs.go` file, which contains the embedded data.
-
-A typical use case is
-
- $ go-bindata-assetfs data/...
-
-### Using assetFS in your code
-
-The generated file provides an `assetFS()` function that returns a `http.Filesystem`
-wrapping the embedded files. What you usually want to do is:
-
- http.Handle("/", http.FileServer(assetFS()))
-
-This would run an HTTP server serving the embedded files.
-
-## Without running binary tool
-
-You can always just run the `go-bindata` tool, and then
-
-use
-
- import "github.com/elazarl/go-bindata-assetfs"
- ...
- http.Handle("/",
- http.FileServer(
- &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"}))
-
-to serve files embedded from the `data` directory.
+# go-bindata-assetfs
+
+Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`.
+
+[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs)
+
+### Installation
+
+Install with
+
+ $ go get github.com/jteeuwen/go-bindata/...
+ $ go get github.com/elazarl/go-bindata-assetfs/...
+
+### Creating embedded data
+
+Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage,
+instead of running `go-bindata` run `go-bindata-assetfs`.
+
+The tool will create a `bindata_assetfs.go` file, which contains the embedded data.
+
+A typical use case is
+
+ $ go-bindata-assetfs data/...
+
+### Using assetFS in your code
+
+The generated file provides an `assetFS()` function that returns a `http.Filesystem`
+wrapping the embedded files. What you usually want to do is:
+
+ http.Handle("/", http.FileServer(assetFS()))
+
+This would run an HTTP server serving the embedded files.
+
+## Without running binary tool
+
+You can always just run the `go-bindata` tool, and then
+
+use
+
+ import "github.com/elazarl/go-bindata-assetfs"
+ ...
+ http.Handle("/",
+ http.FileServer(
+ &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"}))
+
+to serve files embedded from the `data` directory.
diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go b/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go
index 9397e58..04f6d7a 100644
--- a/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go
+++ b/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go
@@ -9,6 +9,7 @@ import (
"os"
"path"
"path/filepath"
+ "strings"
"time"
)
@@ -145,14 +146,22 @@ func (fs *AssetFS) Open(name string) (http.File, error) {
}
if b, err := fs.Asset(name); err == nil {
timestamp := defaultFileTimestamp
- if info, err := fs.AssetInfo(name); err == nil {
- timestamp = info.ModTime()
+ if fs.AssetInfo != nil {
+ if info, err := fs.AssetInfo(name); err == nil {
+ timestamp = info.ModTime()
+ }
}
return NewAssetFile(name, b, timestamp), nil
}
if children, err := fs.AssetDir(name); err == nil {
return NewAssetDirectory(name, children, fs), nil
} else {
+ // If the error is not found, return an error that will
+ // result in a 404 error. Otherwise the server returns
+ // a 500 error for files not found.
+ if strings.Contains(err.Error(), "not found") {
+ return nil, os.ErrNotExist
+ }
return nil, err
}
}
diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go b/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go
deleted file mode 100644
index fdaad5e..0000000
--- a/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package main
-
-import (
- "bufio"
- "bytes"
- "flag"
- "fmt"
- "os"
- "os/exec"
- "strings"
-)
-
-const bindatafile = "bindata.go"
-
-func isDebug(args []string) bool {
- flagset := flag.NewFlagSet("", flag.ContinueOnError)
- debug := flagset.Bool("debug", false, "")
- debugArgs := make([]string, 0)
- for _, arg := range args {
- if strings.HasPrefix(arg, "-debug") {
- debugArgs = append(debugArgs, arg)
- }
- }
- flagset.Parse(debugArgs)
- if debug == nil {
- return false
- }
- return *debug
-}
-
-func main() {
- if _, err := exec.LookPath("go-bindata"); err != nil {
- fmt.Println("Cannot find go-bindata executable in path")
- fmt.Println("Maybe you need: go get github.com/elazarl/go-bindata-assetfs/...")
- os.Exit(1)
- }
- cmd := exec.Command("go-bindata", os.Args[1:]...)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if err := cmd.Run(); err != nil {
- os.Exit(1)
- }
- in, err := os.Open(bindatafile)
- if err != nil {
- fmt.Fprintln(os.Stderr, "Cannot read", bindatafile, err)
- return
- }
- out, err := os.Create("bindata_assetfs.go")
- if err != nil {
- fmt.Fprintln(os.Stderr, "Cannot write 'bindata_assetfs.go'", err)
- return
- }
- debug := isDebug(os.Args[1:])
- r := bufio.NewReader(in)
- done := false
- for line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() {
- if !isPrefix {
- line = append(line, '\n')
- }
- if _, err := out.Write(line); err != nil {
- fmt.Fprintln(os.Stderr, "Cannot write to 'bindata_assetfs.go'", err)
- return
- }
- if !done && !isPrefix && bytes.HasPrefix(line, []byte("import (")) {
- if debug {
- fmt.Fprintln(out, "\t\"net/http\"")
- } else {
- fmt.Fprintln(out, "\t\"github.com/elazarl/go-bindata-assetfs\"")
- }
- done = true
- }
- }
- if debug {
- fmt.Fprintln(out, `
-func assetFS() http.FileSystem {
- for k := range _bintree.Children {
- return http.Dir(k)
- }
- panic("unreachable")
-}`)
- } else {
- fmt.Fprintln(out, `
-func assetFS() *assetfs.AssetFS {
- assetInfo := func(path string) (os.FileInfo, error) {
- return os.Stat(path)
- }
- for k := range _bintree.Children {
- return &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: assetInfo, Prefix: k}
- }
- panic("unreachable")
-}`)
- }
- // Close files BEFORE remove calls (don't use defer).
- in.Close()
- out.Close()
- if err := os.Remove(bindatafile); err != nil {
- fmt.Fprintln(os.Stderr, "Cannot remove", bindatafile, err)
- }
-}
diff --git a/vendor/github.com/facebookgo/ensure/.travis.yml b/vendor/github.com/facebookgo/ensure/.travis.yml
new file mode 100644
index 0000000..9c9f036
--- /dev/null
+++ b/vendor/github.com/facebookgo/ensure/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+go:
+ - 1.5
+
+before_install:
+ - go get -v golang.org/x/tools/cmd/vet
+ - go get -v golang.org/x/tools/cmd/cover
+ - go get -v github.com/golang/lint/golint
+
+install:
+ - go install -race -v std
+ - go get -race -t -v ./...
+ - go install -race -v ./...
+
+script:
+ - go vet ./...
+ - $HOME/gopath/bin/golint .
+ - go test -cpu=2 -race -v ./...
+ - go test -cpu=2 -covermode=atomic ./...
diff --git a/vendor/github.com/facebookgo/ensure/ensure.go b/vendor/github.com/facebookgo/ensure/ensure.go
new file mode 100644
index 0000000..44fab27
--- /dev/null
+++ b/vendor/github.com/facebookgo/ensure/ensure.go
@@ -0,0 +1,353 @@
+// Package ensure provides utilities for testing to ensure the
+// given conditions are met and Fatal if they aren't satisified.
+//
+// The various functions here show a useful error message automatically
+// including identifying source location. They additionally support arbitary
+// arguments which will be printed using the spew library.
+package ensure
+
+import (
+ "bytes"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/facebookgo/stack"
+ subsetp "github.com/facebookgo/subset"
+)
+
+// Fataler defines the minimal interface necessary to trigger a Fatal when a
+// condition is hit. testing.T & testing.B satisfy this for example.
+type Fataler interface {
+ Fatal(a ...interface{})
+}
+
+// cond represents a condition that wasn't satisfied, and is useful to generate
+// log messages.
+type cond struct {
+ Fataler Fataler
+ Skip int
+ Format string
+ FormatArgs []interface{}
+ Extra []interface{}
+ DisableDeleteSelf bool
+}
+
+// This deletes "ensure.go:xx" removing a confusing piece of information since
+// it will be an internal reference.
+var deleteSelf = strings.Repeat("\b", 15)
+
+func (c cond) String() string {
+ var b bytes.Buffer
+ if c.DisableDeleteSelf {
+ fmt.Fprint(&b, "\n")
+ } else {
+ fmt.Fprint(&b, deleteSelf)
+ }
+ fmt.Fprint(&b, pstack(stack.Callers(c.Skip+1), c.DisableDeleteSelf))
+ if c.Format != "" {
+ fmt.Fprintf(&b, c.Format, c.FormatArgs...)
+ }
+ if len(c.Extra) != 0 {
+ fmt.Fprint(&b, "\n")
+ fmt.Fprint(&b, tsdump(c.Extra...))
+ }
+ return b.String()
+}
+
+// fatal triggers the fatal and logs the cond's message. It adds 2 to Skip, to
+// skip itself as well as the caller.
+func fatal(c cond) {
+ c.Skip = c.Skip + 2
+ c.Fataler.Fatal(c.String())
+}
+
+// Err ensures the error satisfies the given regular expression.
+func Err(t Fataler, err error, re *regexp.Regexp, a ...interface{}) {
+ if err == nil && re == nil {
+ return
+ }
+
+ if err == nil && re != nil {
+ fatal(cond{
+ Fataler: t,
+ Format: `expected error: "%s" but got a nil error`,
+ FormatArgs: []interface{}{re},
+ Extra: a,
+ })
+ return
+ }
+
+ if err != nil && re == nil {
+ fatal(cond{
+ Fataler: t,
+ Format: `unexpected error: %s`,
+ FormatArgs: []interface{}{err},
+ Extra: a,
+ })
+ return
+ }
+
+ if !re.MatchString(err.Error()) {
+ fatal(cond{
+ Fataler: t,
+ Format: `expected error: "%s" but got "%s"`,
+ FormatArgs: []interface{}{re, err},
+ Extra: a,
+ })
+ }
+}
+
+// DeepEqual ensures actual and expected are equal. It does so using
+// reflect.DeepEqual.
+func DeepEqual(t Fataler, actual, expected interface{}, a ...interface{}) {
+ if !reflect.DeepEqual(actual, expected) {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected these to be equal:\nACTUAL:\n%s\nEXPECTED:\n%s",
+ FormatArgs: []interface{}{spew.Sdump(actual), tsdump(expected)},
+ Extra: a,
+ })
+ }
+}
+
+// NotDeepEqual ensures actual and expected are not equal. It does so using
+// reflect.DeepEqual.
+func NotDeepEqual(t Fataler, actual, expected interface{}, a ...interface{}) {
+ if reflect.DeepEqual(actual, expected) {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected two different values, but got the same:\n%s",
+ FormatArgs: []interface{}{tsdump(actual)},
+ Extra: a,
+ })
+ }
+}
+
+// Subset ensures actual matches subset.
+func Subset(t Fataler, actual, subset interface{}, a ...interface{}) {
+ if !subsetp.Check(subset, actual) {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected subset not found:\nACTUAL:\n%s\nEXPECTED SUBSET\n%s",
+ FormatArgs: []interface{}{spew.Sdump(actual), tsdump(subset)},
+ Extra: a,
+ })
+ }
+}
+
+// DisorderedSubset attempts to find all the given subsets in the list of actuals.
+// Does not allow one actual to match more than one subset, be warray of the
+// possibility of insufficiently specific subsets.
+func DisorderedSubset(t Fataler, a, s interface{}, extra ...interface{}) {
+ actuals := toInterfaceSlice(a)
+ subsets := toInterfaceSlice(s)
+
+ used := make([]bool, len(actuals))
+ matches := 0
+ for _, subset := range subsets {
+ for i, actual := range actuals {
+ if used[i] {
+ continue
+ }
+ if subsetp.Check(subset, actual) {
+ matches++
+ used[i] = true
+ break
+ }
+ }
+ }
+ if matches != len(subsets) {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected subsets not found:\nACTUAL:\n%s\nEXPECTED SUBSET\n%s",
+ FormatArgs: []interface{}{spew.Sdump(actuals), tsdump(subsets)},
+ Extra: extra,
+ })
+ }
+}
+
+// Nil ensures v is nil.
+func Nil(t Fataler, v interface{}, a ...interface{}) {
+ vs := tsdump(v)
+ sp := " "
+ if strings.Contains(vs[:len(vs)-1], "\n") {
+ sp = "\n"
+ }
+
+ if v != nil {
+ // Special case errors for prettier output.
+ if _, ok := v.(error); ok {
+ fatal(cond{
+ Fataler: t,
+ Format: `unexpected error: %s`,
+ FormatArgs: []interface{}{v},
+ Extra: a,
+ })
+ } else {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected nil value but got:%s%s",
+ FormatArgs: []interface{}{sp, vs},
+ Extra: a,
+ })
+ }
+ }
+}
+
+// NotNil ensures v is not nil.
+func NotNil(t Fataler, v interface{}, a ...interface{}) {
+ if v == nil {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected a value but got nil",
+ Extra: a,
+ })
+ }
+}
+
+// True ensures v is true.
+func True(t Fataler, v bool, a ...interface{}) {
+ if !v {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected true but got false",
+ Extra: a,
+ })
+ }
+}
+
+// False ensures v is false.
+func False(t Fataler, v bool, a ...interface{}) {
+ if v {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected false but got true",
+ Extra: a,
+ })
+ }
+}
+
+// StringContains ensures string s contains the string substr.
+func StringContains(t Fataler, s, substr string, a ...interface{}) {
+ if !strings.Contains(s, substr) {
+ format := `expected substring "%s" was not found in "%s"`
+
+ // use multi line output if either string contains newlines
+ if strings.Contains(s, "\n") || strings.Contains(substr, "\n") {
+ format = "expected substring was not found:\nEXPECTED SUBSTRING:\n%s\nACTUAL:\n%s"
+ }
+
+ fatal(cond{
+ Fataler: t,
+ Format: format,
+ FormatArgs: []interface{}{substr, s},
+ Extra: a,
+ })
+ }
+}
+
+// StringDoesNotContain ensures string s does not contain the string substr.
+func StringDoesNotContain(t Fataler, s, substr string, a ...interface{}) {
+ if strings.Contains(s, substr) {
+ fatal(cond{
+ Fataler: t,
+ Format: `substring "%s" was not supposed to be found in "%s"`,
+ FormatArgs: []interface{}{substr, s},
+ Extra: a,
+ })
+ }
+}
+
+// SameElements ensures the two given slices contain the same elements,
+// ignoring the order. It uses DeepEqual for element comparison.
+func SameElements(t Fataler, actual, expected interface{}, extra ...interface{}) {
+ actualSlice := toInterfaceSlice(actual)
+ expectedSlice := toInterfaceSlice(expected)
+ if len(actualSlice) != len(expectedSlice) {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected same elements but found slices of different lengths:\nACTUAL:\n%s\nEXPECTED\n%s",
+ FormatArgs: []interface{}{tsdump(actual), tsdump(expected)},
+ Extra: extra,
+ })
+ }
+
+ used := map[int]bool{}
+outer:
+ for _, a := range expectedSlice {
+ for i, b := range actualSlice {
+ if !used[i] && reflect.DeepEqual(a, b) {
+ used[i] = true
+ continue outer
+ }
+ }
+ fatal(cond{
+ Fataler: t,
+ Format: "missing expected element:\nACTUAL:\n%s\nEXPECTED:\n%s\nMISSING ELEMENT\n%s",
+ FormatArgs: []interface{}{tsdump(actual), tsdump(expected), tsdump(a)},
+ Extra: extra,
+ })
+ }
+}
+
+// PanicDeepEqual ensures a panic occurs and the recovered value is DeepEqual
+// to the expected value.
+func PanicDeepEqual(t Fataler, expected interface{}, a ...interface{}) {
+ if expected == nil {
+ panic("can't pass nil to ensure.PanicDeepEqual")
+ }
+ actual := recover()
+ if !reflect.DeepEqual(actual, expected) {
+ fatal(cond{
+ Fataler: t,
+ Format: "expected these to be equal:\nACTUAL:\n%s\nEXPECTED:\n%s",
+ FormatArgs: []interface{}{spew.Sdump(actual), tsdump(expected)},
+ Extra: a,
+ DisableDeleteSelf: true,
+ })
+ }
+}
+
+// makes any slice into an []interface{}
+func toInterfaceSlice(v interface{}) []interface{} {
+ rv := reflect.ValueOf(v)
+ l := rv.Len()
+ ret := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ ret[i] = rv.Index(i).Interface()
+ }
+ return ret
+}
+
+// tsdump is Sdump without the trailing newline.
+func tsdump(a ...interface{}) string {
+ return strings.TrimSpace(spew.Sdump(a...))
+}
+
+// pstack is the stack upto the Test function frame.
+func pstack(s stack.Stack, skipPrefix bool) string {
+ first := s[0]
+ if isTestFrame(first) {
+ return fmt.Sprintf("%s:%d: ", filepath.Base(first.File), first.Line)
+ }
+ prefix := " "
+ if skipPrefix {
+ prefix = ""
+ }
+ var snew stack.Stack
+ for _, f := range s {
+ snew = append(snew, f)
+ if isTestFrame(f) {
+ return prefix + snew.String() + "\n"
+ }
+ }
+ return prefix + s.String() + "\n"
+}
+
+func isTestFrame(f stack.Frame) bool {
+ return strings.HasPrefix(f.Name, "Test")
+}
diff --git a/vendor/github.com/facebookgo/ensure/license b/vendor/github.com/facebookgo/ensure/license
new file mode 100644
index 0000000..356f2fd
--- /dev/null
+++ b/vendor/github.com/facebookgo/ensure/license
@@ -0,0 +1,30 @@
+BSD License
+
+For ensure software
+
+Copyright (c) 2015, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/facebookgo/ensure/patents b/vendor/github.com/facebookgo/ensure/patents
new file mode 100644
index 0000000..808ef85
--- /dev/null
+++ b/vendor/github.com/facebookgo/ensure/patents
@@ -0,0 +1,33 @@
+Additional Grant of Patent Rights Version 2
+
+"Software" means the ensure software distributed by Facebook, Inc.
+
+Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software
+("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable
+(subject to the termination provision below) license under any Necessary
+Claims, to make, have made, use, sell, offer to sell, import, and otherwise
+transfer the Software. For avoidance of doubt, no license is granted under
+Facebook’s rights in any patent claims that are infringed by (i) modifications
+to the Software made by you or any third party or (ii) the Software in
+combination with any software or other technology.
+
+The license granted hereunder will terminate, automatically and without notice,
+if you (or any of your subsidiaries, corporate affiliates or agents) initiate
+directly or indirectly, or take a direct financial interest in, any Patent
+Assertion: (i) against Facebook or any of its subsidiaries or corporate
+affiliates, (ii) against any party if such Patent Assertion arises in whole or
+in part from any software, technology, product or service of Facebook or any of
+its subsidiaries or corporate affiliates, or (iii) against any party relating
+to the Software. Notwithstanding the foregoing, if Facebook or any of its
+subsidiaries or corporate affiliates files a lawsuit alleging patent
+infringement against you in the first instance, and you respond by filing a
+patent infringement counterclaim in that lawsuit against that party that is
+unrelated to the Software, the license granted hereunder will not terminate
+under section (i) of this paragraph due to such counterclaim.
+
+A "Necessary Claim" is a claim of a patent owned by Facebook that is
+necessarily infringed by the Software standing alone.
+
+A "Patent Assertion" is any lawsuit or other action alleging direct, indirect,
+or contributory infringement or inducement to infringe any patent, including a
+cross-claim or counterclaim.
diff --git a/vendor/github.com/facebookgo/ensure/readme.md b/vendor/github.com/facebookgo/ensure/readme.md
new file mode 100644
index 0000000..9c3ba37
--- /dev/null
+++ b/vendor/github.com/facebookgo/ensure/readme.md
@@ -0,0 +1,4 @@
+ensure [![Build Status](https://secure.travis-ci.org/facebookgo/ensure.png)](http://travis-ci.org/facebookgo/ensure)
+======
+
+Documentation: https://godoc.org/github.com/facebookgo/ensure
diff --git a/vendor/github.com/facebookgo/freeport/.travis.yml b/vendor/github.com/facebookgo/freeport/.travis.yml
new file mode 100644
index 0000000..2cc62c5
--- /dev/null
+++ b/vendor/github.com/facebookgo/freeport/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+go:
+ - 1.2
+ - 1.3
+
+matrix:
+ fast_finish: true
+
+before_install:
+ - go get -v code.google.com/p/go.tools/cmd/vet
+ - go get -v github.com/golang/lint/golint
+ - go get -v code.google.com/p/go.tools/cmd/cover
+
+install:
+ - go install -race -v std
+ - go get -race -t -v ./...
+ - go install -race -v ./...
+
+script:
+ - go vet ./...
+ - $HOME/gopath/bin/golint .
+ - go test -cpu=2 -race -v ./...
+ - go test -cpu=2 -covermode=atomic ./...
diff --git a/vendor/github.com/facebookgo/freeport/freeport.go b/vendor/github.com/facebookgo/freeport/freeport.go
new file mode 100644
index 0000000..14e1009
--- /dev/null
+++ b/vendor/github.com/facebookgo/freeport/freeport.go
@@ -0,0 +1,24 @@
+// Package freeport provides an API to find a free port to bind to.
+package freeport
+
+import (
+ "net"
+ "strconv"
+)
+
+// Get a free port.
+func Get() (port int, err error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return 0, err
+ }
+ defer listener.Close()
+
+ addr := listener.Addr().String()
+ _, portString, err := net.SplitHostPort(addr)
+ if err != nil {
+ return 0, err
+ }
+
+ return strconv.Atoi(portString)
+}
diff --git a/vendor/github.com/facebookgo/freeport/license b/vendor/github.com/facebookgo/freeport/license
new file mode 100644
index 0000000..e440440
--- /dev/null
+++ b/vendor/github.com/facebookgo/freeport/license
@@ -0,0 +1,30 @@
+BSD License
+
+For freeport software
+
+Copyright (c) 2015, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/facebookgo/freeport/patents b/vendor/github.com/facebookgo/freeport/patents
new file mode 100644
index 0000000..3947444
--- /dev/null
+++ b/vendor/github.com/facebookgo/freeport/patents
@@ -0,0 +1,33 @@
+Additional Grant of Patent Rights Version 2
+
+"Software" means the freeport software distributed by Facebook, Inc.
+
+Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software
+("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable
+(subject to the termination provision below) license under any Necessary
+Claims, to make, have made, use, sell, offer to sell, import, and otherwise
+transfer the Software. For avoidance of doubt, no license is granted under
+Facebook’s rights in any patent claims that are infringed by (i) modifications
+to the Software made by you or any third party or (ii) the Software in
+combination with any software or other technology.
+
+The license granted hereunder will terminate, automatically and without notice,
+if you (or any of your subsidiaries, corporate affiliates or agents) initiate
+directly or indirectly, or take a direct financial interest in, any Patent
+Assertion: (i) against Facebook or any of its subsidiaries or corporate
+affiliates, (ii) against any party if such Patent Assertion arises in whole or
+in part from any software, technology, product or service of Facebook or any of
+its subsidiaries or corporate affiliates, or (iii) against any party relating
+to the Software. Notwithstanding the foregoing, if Facebook or any of its
+subsidiaries or corporate affiliates files a lawsuit alleging patent
+infringement against you in the first instance, and you respond by filing a
+patent infringement counterclaim in that lawsuit against that party that is
+unrelated to the Software, the license granted hereunder will not terminate
+under section (i) of this paragraph due to such counterclaim.
+
+A "Necessary Claim" is a claim of a patent owned by Facebook that is
+necessarily infringed by the Software standing alone.
+
+A "Patent Assertion" is any lawsuit or other action alleging direct, indirect,
+or contributory infringement or inducement to infringe any patent, including a
+cross-claim or counterclaim.
diff --git a/vendor/github.com/facebookgo/freeport/readme.md b/vendor/github.com/facebookgo/freeport/readme.md
new file mode 100644
index 0000000..2545441
--- /dev/null
+++ b/vendor/github.com/facebookgo/freeport/readme.md
@@ -0,0 +1,5 @@
+freeport [![Build Status](https://secure.travis-ci.org/facebookgo/freeport.png)](http://travis-ci.org/facebookgo/freeport)
+========
+
+Find a free port. Documentation:
+http://godoc.org/github.com/facebookgo/freeport
diff --git a/vendor/github.com/facebookgo/stack/.travis.yml b/vendor/github.com/facebookgo/stack/.travis.yml
new file mode 100644
index 0000000..4938b45
--- /dev/null
+++ b/vendor/github.com/facebookgo/stack/.travis.yml
@@ -0,0 +1,23 @@
+language: go
+
+go:
+ - 1.3
+
+matrix:
+ fast_finish: true
+
+before_install:
+ - if ! go get code.google.com/p/go.tools/cmd/vet; then go get golang.org/x/tools/cmd/vet; fi
+ - go get -v github.com/golang/lint/golint
+ - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+
+install:
+ - go install -race -v std
+ - go get -race -t -v ./...
+ - go install -race -v ./...
+
+script:
+ - go vet ./...
+ - $HOME/gopath/bin/golint .
+ - go test -cpu=2 -race -v ./...
+ - go test -cpu=2 -covermode=atomic ./...
diff --git a/vendor/github.com/facebookgo/stack/license b/vendor/github.com/facebookgo/stack/license
new file mode 100644
index 0000000..f55fc8a
--- /dev/null
+++ b/vendor/github.com/facebookgo/stack/license
@@ -0,0 +1,30 @@
+BSD License
+
+For stack software
+
+Copyright (c) 2015, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/facebookgo/stack/patents b/vendor/github.com/facebookgo/stack/patents
new file mode 100644
index 0000000..711e9e6
--- /dev/null
+++ b/vendor/github.com/facebookgo/stack/patents
@@ -0,0 +1,33 @@
+Additional Grant of Patent Rights Version 2
+
+"Software" means the stack software distributed by Facebook, Inc.
+
+Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software
+("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable
+(subject to the termination provision below) license under any Necessary
+Claims, to make, have made, use, sell, offer to sell, import, and otherwise
+transfer the Software. For avoidance of doubt, no license is granted under
+Facebook’s rights in any patent claims that are infringed by (i) modifications
+to the Software made by you or any third party or (ii) the Software in
+combination with any software or other technology.
+
+The license granted hereunder will terminate, automatically and without notice,
+if you (or any of your subsidiaries, corporate affiliates or agents) initiate
+directly or indirectly, or take a direct financial interest in, any Patent
+Assertion: (i) against Facebook or any of its subsidiaries or corporate
+affiliates, (ii) against any party if such Patent Assertion arises in whole or
+in part from any software, technology, product or service of Facebook or any of
+its subsidiaries or corporate affiliates, or (iii) against any party relating
+to the Software. Notwithstanding the foregoing, if Facebook or any of its
+subsidiaries or corporate affiliates files a lawsuit alleging patent
+infringement against you in the first instance, and you respond by filing a
+patent infringement counterclaim in that lawsuit against that party that is
+unrelated to the Software, the license granted hereunder will not terminate
+under section (i) of this paragraph due to such counterclaim.
+
+A "Necessary Claim" is a claim of a patent owned by Facebook that is
+necessarily infringed by the Software standing alone.
+
+A "Patent Assertion" is any lawsuit or other action alleging direct, indirect,
+or contributory infringement or inducement to infringe any patent, including a
+cross-claim or counterclaim.
diff --git a/vendor/github.com/facebookgo/stack/readme.md b/vendor/github.com/facebookgo/stack/readme.md
new file mode 100644
index 0000000..e6db95c
--- /dev/null
+++ b/vendor/github.com/facebookgo/stack/readme.md
@@ -0,0 +1,142 @@
+# stack
+--
+ import "github.com/facebookgo/stack"
+
+Package stack provides utilities to capture and pass around stack traces.
+
+This is useful for building errors that know where they originated from, to
+track where a certain log event occured and so on.
+
+The package provides stack.Multi which represents a sequence of stack traces.
+Since in Go we return errors they don't necessarily end up with a single useful
+stack trace. For example an error may be going thru a channel across goroutines,
+in which case we may want to capture a stack trace in both (or many) goroutines.
+stack.Multi in turn is made up of stack.Stack, which is a set of stack.Frames.
+Each stack.Frame contains the File/Line/Name (function name). All these types
+implement a pretty human readable String() function.
+
+The GOPATH is stripped from the File location. Look at the StripGOPATH function
+on instructions for how to embed to GOPATH into the binary for when deploying to
+production and the GOPATH environment variable may not be set. The package name
+is stripped from the Name of the function since it included in the File
+location.
+
+## Usage
+
+#### func StripGOPATH
+
+```go
+func StripGOPATH(f string) string
+```
+StripGOPATH strips the GOPATH prefix from the file path f. In development, this
+will be done using the GOPATH environment variable. For production builds, where
+the GOPATH environment will not be set, the GOPATH can be included in the binary
+by passing ldflags, for example:
+
+ GO_LDFLAGS="$GO_LDFLAGS -X github.com/facebookgo/stack.gopath $GOPATH"
+ go install "-ldflags=$GO_LDFLAGS" my/pkg
+
+#### func StripPackage
+
+```go
+func StripPackage(n string) string
+```
+StripPackage strips the package name from the given Func.Name.
+
+#### type Frame
+
+```go
+type Frame struct {
+ File string
+ Line int
+ Name string
+}
+```
+
+Frame identifies a file, line & function name in the stack.
+
+#### func Caller
+
+```go
+func Caller(skip int) Frame
+```
+Caller returns a single Frame for the caller. The argument skip is the number of
+stack frames to ascend, with 0 identifying the caller of Callers.
+
+#### func (Frame) String
+
+```go
+func (f Frame) String() string
+```
+String provides the standard file:line representation.
+
+#### type Multi
+
+```go
+type Multi struct {
+}
+```
+
+Multi represents a number of Stacks. This is useful to allow tracking a value as
+it travels thru code.
+
+#### func CallersMulti
+
+```go
+func CallersMulti(skip int) *Multi
+```
+CallersMulti returns a Multi which includes one Stack for the current callers.
+The argument skip is the number of stack frames to ascend, with 0 identifying
+the caller of CallersMulti.
+
+#### func (*Multi) Add
+
+```go
+func (m *Multi) Add(s Stack)
+```
+Add the given Stack to this Multi.
+
+#### func (*Multi) AddCallers
+
+```go
+func (m *Multi) AddCallers(skip int)
+```
+AddCallers adds the Callers Stack to this Multi. The argument skip is the number
+of stack frames to ascend, with 0 identifying the caller of Callers.
+
+#### func (*Multi) Stacks
+
+```go
+func (m *Multi) Stacks() []Stack
+```
+Stacks returns the tracked Stacks.
+
+#### func (*Multi) String
+
+```go
+func (m *Multi) String() string
+```
+String provides a human readable multi-line stack trace.
+
+#### type Stack
+
+```go
+type Stack []Frame
+```
+
+Stack represents an ordered set of Frames.
+
+#### func Callers
+
+```go
+func Callers(skip int) Stack
+```
+Callers returns a Stack of Frames for the callers. The argument skip is the
+number of stack frames to ascend, with 0 identifying the caller of Callers.
+
+#### func (Stack) String
+
+```go
+func (s Stack) String() string
+```
+String provides the standard multi-line stack trace.
diff --git a/vendor/github.com/facebookgo/stack/stack.go b/vendor/github.com/facebookgo/stack/stack.go
new file mode 100644
index 0000000..fb2a6cb
--- /dev/null
+++ b/vendor/github.com/facebookgo/stack/stack.go
@@ -0,0 +1,230 @@
+// Package stack provides utilities to capture and pass around stack traces.
+//
+// This is useful for building errors that know where they originated from, to
+// track where a certain log event occured and so on.
+//
+// The package provides stack.Multi which represents a sequence of stack
+// traces. Since in Go we return errors they don't necessarily end up with a
+// single useful stack trace. For example an error may be going thru a channel
+// across goroutines, in which case we may want to capture a stack trace in
+// both (or many) goroutines. stack.Multi in turn is made up of stack.Stack,
+// which is a set of stack.Frames. Each stack.Frame contains the File/Line/Name
+// (function name). All these types implement a pretty human readable String()
+// function.
+//
+// The GOPATH is stripped from the File location. Look at the StripGOPATH
+// function on instructions for how to embed to GOPATH into the binary for when
+// deploying to production and the GOPATH environment variable may not be set.
+// The package name is stripped from the Name of the function since it included
+// in the File location.
+package stack
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+const maxStackSize = 32
+
+// Frame identifies a file, line & function name in the stack.
+type Frame struct {
+ File string
+ Line int
+ Name string
+}
+
+// String provides the standard file:line representation.
+func (f Frame) String() string {
+ return fmt.Sprintf("%s:%d %s", f.File, f.Line, f.Name)
+}
+
+// Stack represents an ordered set of Frames.
+type Stack []Frame
+
+// String provides the standard multi-line stack trace.
+func (s Stack) String() string {
+ var b bytes.Buffer
+ writeStack(&b, s)
+ return b.String()
+}
+
+// Multi represents a number of Stacks. This is useful to allow tracking a
+// value as it travels thru code.
+type Multi struct {
+ stacks []Stack
+}
+
+// Stacks returns the tracked Stacks.
+func (m *Multi) Stacks() []Stack {
+ return m.stacks
+}
+
+// Add the given Stack to this Multi.
+func (m *Multi) Add(s Stack) {
+ m.stacks = append(m.stacks, s)
+}
+
+// AddCallers adds the Callers Stack to this Multi. The argument skip is
+// the number of stack frames to ascend, with 0 identifying the caller of
+// Callers.
+func (m *Multi) AddCallers(skip int) {
+ m.Add(Callers(skip + 1))
+}
+
+// String provides a human readable multi-line stack trace.
+func (m *Multi) String() string {
+ var b bytes.Buffer
+ for i, s := range m.stacks {
+ if i != 0 {
+ fmt.Fprintf(&b, "\n(Stack %d)\n", i+1)
+ }
+ writeStack(&b, s)
+ }
+ return b.String()
+}
+
+// Copy makes a copy of the stack which is safe to modify.
+func (m *Multi) Copy() *Multi {
+ m2 := &Multi{
+ stacks: make([]Stack, len(m.stacks)),
+ }
+ copy(m2.stacks, m.stacks)
+ return m2
+}
+
+// Caller returns a single Frame for the caller. The argument skip is the
+// number of stack frames to ascend, with 0 identifying the caller of Callers.
+func Caller(skip int) Frame {
+ pc, file, line, _ := runtime.Caller(skip + 1)
+ fun := runtime.FuncForPC(pc)
+ return Frame{
+ File: StripGOPATH(file),
+ Line: line,
+ Name: StripPackage(fun.Name()),
+ }
+}
+
+// Callers returns a Stack of Frames for the callers. The argument skip is the
+// number of stack frames to ascend, with 0 identifying the caller of Callers.
+func Callers(skip int) Stack {
+ pcs := make([]uintptr, maxStackSize)
+ num := runtime.Callers(skip+2, pcs)
+ stack := make(Stack, num)
+ for i, pc := range pcs[:num] {
+ fun := runtime.FuncForPC(pc)
+ file, line := fun.FileLine(pc - 1)
+ stack[i].File = StripGOPATH(file)
+ stack[i].Line = line
+ stack[i].Name = StripPackage(fun.Name())
+ }
+ return stack
+}
+
+// CallersMulti returns a Multi which includes one Stack for the
+// current callers. The argument skip is the number of stack frames to ascend,
+// with 0 identifying the caller of CallersMulti.
+func CallersMulti(skip int) *Multi {
+ m := new(Multi)
+ m.AddCallers(skip + 1)
+ return m
+}
+
+func writeStack(b *bytes.Buffer, s Stack) {
+ var width int
+ for _, f := range s {
+ if l := len(f.File) + numDigits(f.Line) + 1; l > width {
+ width = l
+ }
+ }
+ last := len(s) - 1
+ for i, f := range s {
+ b.WriteString(f.File)
+ b.WriteRune(rune(':'))
+ n, _ := fmt.Fprintf(b, "%d", f.Line)
+ for i := width - len(f.File) - n; i != 0; i-- {
+ b.WriteRune(rune(' '))
+ }
+ b.WriteString(f.Name)
+ if i != last {
+ b.WriteRune(rune('\n'))
+ }
+ }
+}
+
+func numDigits(i int) int {
+ var n int
+ for {
+ n++
+ i = i / 10
+ if i == 0 {
+ return n
+ }
+ }
+}
+
+var (
+ // This can be set by a build script. It will be the colon separated equivalent
+ // of the environment variable.
+ gopath string
+
+ // This is the processed version based on either the above variable set by the
+ // build or from the GOPATH environment variable.
+ gopaths []string
+)
+
+func init() {
+ // prefer the variable set at build time, otherwise fallback to the
+ // environment variable.
+ if gopath == "" {
+ gopath = os.Getenv("GOPATH")
+ }
+ SetGOPATH(gopath)
+}
+
+// StripGOPATH strips the GOPATH prefix from the file path f.
+// In development, this will be done using the GOPATH environment variable.
+// For production builds, where the GOPATH environment will not be set, the
+// GOPATH can be included in the binary by passing ldflags, for example:
+//
+// GO_LDFLAGS="$GO_LDFLAGS -X github.com/facebookgo/stack.gopath $GOPATH"
+// go install "-ldflags=$GO_LDFLAGS" my/pkg
+func StripGOPATH(f string) string {
+ for _, p := range gopaths {
+ if strings.HasPrefix(f, p) {
+ return f[len(p):]
+ }
+ }
+ return f
+}
+
+// SetGOPATH configures the GOPATH to enable relative paths in stack traces.
+func SetGOPATH(gp string) {
+ gopath = gp
+ gopaths = nil
+
+ for _, p := range strings.Split(gopath, ":") {
+ if p != "" {
+ gopaths = append(gopaths, filepath.Join(p, "src")+"/")
+ }
+ }
+
+ // Also strip GOROOT for maximum cleanliness
+ gopaths = append(gopaths, filepath.Join(runtime.GOROOT(), "src", "pkg")+"/")
+}
+
+// StripPackage strips the package name from the given Func.Name.
+func StripPackage(n string) string {
+ slashI := strings.LastIndex(n, "/")
+ if slashI == -1 {
+ slashI = 0 // for built-in packages
+ }
+ dotI := strings.Index(n[slashI:], ".")
+ if dotI == -1 {
+ return n
+ }
+ return n[slashI+dotI+1:]
+}
diff --git a/vendor/github.com/facebookgo/subset/.travis.yml b/vendor/github.com/facebookgo/subset/.travis.yml
new file mode 100644
index 0000000..2cc62c5
--- /dev/null
+++ b/vendor/github.com/facebookgo/subset/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+go:
+ - 1.2
+ - 1.3
+
+matrix:
+ fast_finish: true
+
+before_install:
+ - go get -v code.google.com/p/go.tools/cmd/vet
+ - go get -v github.com/golang/lint/golint
+ - go get -v code.google.com/p/go.tools/cmd/cover
+
+install:
+ - go install -race -v std
+ - go get -race -t -v ./...
+ - go install -race -v ./...
+
+script:
+ - go vet ./...
+ - $HOME/gopath/bin/golint .
+ - go test -cpu=2 -race -v ./...
+ - go test -cpu=2 -covermode=atomic ./...
diff --git a/vendor/github.com/facebookgo/subset/license b/vendor/github.com/facebookgo/subset/license
new file mode 100644
index 0000000..2e00ce9
--- /dev/null
+++ b/vendor/github.com/facebookgo/subset/license
@@ -0,0 +1,30 @@
+BSD License
+
+For subset software
+
+Copyright (c) 2015, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/facebookgo/subset/patents b/vendor/github.com/facebookgo/subset/patents
new file mode 100644
index 0000000..834749c
--- /dev/null
+++ b/vendor/github.com/facebookgo/subset/patents
@@ -0,0 +1,33 @@
+Additional Grant of Patent Rights Version 2
+
+"Software" means the subset software distributed by Facebook, Inc.
+
+Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software
+("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable
+(subject to the termination provision below) license under any Necessary
+Claims, to make, have made, use, sell, offer to sell, import, and otherwise
+transfer the Software. For avoidance of doubt, no license is granted under
+Facebook’s rights in any patent claims that are infringed by (i) modifications
+to the Software made by you or any third party or (ii) the Software in
+combination with any software or other technology.
+
+The license granted hereunder will terminate, automatically and without notice,
+if you (or any of your subsidiaries, corporate affiliates or agents) initiate
+directly or indirectly, or take a direct financial interest in, any Patent
+Assertion: (i) against Facebook or any of its subsidiaries or corporate
+affiliates, (ii) against any party if such Patent Assertion arises in whole or
+in part from any software, technology, product or service of Facebook or any of
+its subsidiaries or corporate affiliates, or (iii) against any party relating
+to the Software. Notwithstanding the foregoing, if Facebook or any of its
+subsidiaries or corporate affiliates files a lawsuit alleging patent
+infringement against you in the first instance, and you respond by filing a
+patent infringement counterclaim in that lawsuit against that party that is
+unrelated to the Software, the license granted hereunder will not terminate
+under section (i) of this paragraph due to such counterclaim.
+
+A "Necessary Claim" is a claim of a patent owned by Facebook that is
+necessarily infringed by the Software standing alone.
+
+A "Patent Assertion" is any lawsuit or other action alleging direct, indirect,
+or contributory infringement or inducement to infringe any patent, including a
+cross-claim or counterclaim.
diff --git a/vendor/github.com/facebookgo/subset/readme.md b/vendor/github.com/facebookgo/subset/readme.md
new file mode 100644
index 0000000..b95f6c8
--- /dev/null
+++ b/vendor/github.com/facebookgo/subset/readme.md
@@ -0,0 +1,5 @@
+subset [![Build Status](https://secure.travis-ci.org/facebookgo/subset.png)](http://travis-ci.org/facebookgo/subset)
+======
+
+Check if a value is a subset of another, based on reflect/deepequals.go.
+Documentation: http://godoc.org/github.com/facebookgo/subset
diff --git a/vendor/github.com/facebookgo/subset/subset.go b/vendor/github.com/facebookgo/subset/subset.go
new file mode 100644
index 0000000..dbc0a12
--- /dev/null
+++ b/vendor/github.com/facebookgo/subset/subset.go
@@ -0,0 +1,200 @@
+// Package subset implements logic to check if a value is a subset of
+// another using reflect.
+package subset
+
+import (
+ "reflect"
+)
+
+// During checkSubset, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ a1 uintptr
+ a2 uintptr
+ typ reflect.Type
+ next *visit
+}
+
+// Fatalf is how our assertion will fail.
+type Fatalf interface {
+ Fatalf(format string, args ...interface{})
+}
+
+// Ideally we'ed be able to use reflec.valueInterface(v, false) and
+// look at unexported fields, but for now we just ignore them.
+func safeInterface(v reflect.Value) (i interface{}) {
+ defer func() {
+ if err := recover(); err != nil {
+ // fmt.Println("Recovered safeInterface:", err)
+ i = nil
+ }
+ }()
+ return v.Interface()
+}
+
+// Tests for deep equality using reflected types. The map argument tracks
+// comparisons that have already been seen, which allows short circuiting on
+// recursive types.
+func checkSubset(expected, target reflect.Value, visited map[uintptr]*visit, depth int) (b bool) {
+ if !expected.IsValid() {
+ // fmt.Println("!expected.IsValid()")
+ return true
+ }
+ if !target.IsValid() {
+ // fmt.Println("!target.IsValid()")
+ return false
+ }
+ if expected.Type() != target.Type() {
+ // fmt.Println("Type() differs")
+ return false
+ }
+
+ // if depth > 10 { panic("checkSubset") } // for debugging
+
+ if expected.CanAddr() && target.CanAddr() {
+ addr1 := expected.UnsafeAddr()
+ addr2 := target.UnsafeAddr()
+ if addr1 > addr2 {
+ // Canonicalize order to reduce number of entries in visited.
+ addr1, addr2 = addr2, addr1
+ }
+
+ // Short circuit if references are identical ...
+ if addr1 == addr2 {
+ return true
+ }
+
+ // ... or already seen
+ h := 17*addr1 + addr2
+ seen := visited[h]
+ typ := expected.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.a1 == addr1 && p.a2 == addr2 && p.typ == typ {
+ return true
+ }
+ }
+
+ // Remember for later.
+ visited[h] = &visit{addr1, addr2, typ, seen}
+ }
+
+ switch expected.Kind() {
+ case reflect.Array:
+ // fmt.Println("Kind: Array")
+ if expected.Len() == 0 {
+ return true
+ }
+ if expected.Len() != target.Len() {
+ return false
+ }
+ for i := 0; i < expected.Len(); i++ {
+ if !checkSubset(expected.Index(i), target.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Slice:
+ // fmt.Println("Kind: Slice")
+ if expected.IsNil() {
+ return true
+ }
+ if expected.IsNil() != target.IsNil() {
+ return false
+ }
+ if expected.Len() != target.Len() {
+ return false
+ }
+ for i := 0; i < expected.Len(); i++ {
+ if !checkSubset(expected.Index(i), target.Index(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Interface:
+ // fmt.Println("Kind: Interface")
+ if expected.IsNil() {
+ return true
+ }
+ if expected.IsNil() || target.IsNil() {
+ return expected.IsNil() == target.IsNil()
+ }
+ return checkSubset(expected.Elem(), target.Elem(), visited, depth+1)
+ case reflect.Ptr:
+ // fmt.Println("Kind: Ptr")
+ return checkSubset(expected.Elem(), target.Elem(), visited, depth+1)
+ case reflect.Struct:
+ // fmt.Println("Kind: Struct")
+ for i, n := 0, expected.NumField(); i < n; i++ {
+ if !checkSubset(expected.Field(i), target.Field(i), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Map:
+ // fmt.Println("Kind: Map")
+ if expected.IsNil() {
+ return true
+ }
+ if expected.IsNil() != target.IsNil() {
+ return false
+ }
+ for _, k := range expected.MapKeys() {
+ if !checkSubset(expected.MapIndex(k), target.MapIndex(k), visited, depth+1) {
+ return false
+ }
+ }
+ return true
+ case reflect.Func:
+ // fmt.Println("Kind: Func")
+ if expected.IsNil() && target.IsNil() {
+ return true
+ }
+ // Can't do better than this:
+ return false
+ default:
+ expectedInterface := safeInterface(expected)
+ if expectedInterface == nil {
+ return true
+ }
+ targetInterface := target.Interface() // expect this to be safe now
+ // fmt.Println("Kind: default", expectedInterface, targetInterface)
+ // ignore zero value expectations
+ zeroValue := reflect.Zero(expected.Type())
+ if reflect.DeepEqual(expectedInterface, zeroValue.Interface()) {
+ // fmt.Println("Expecting zero value")
+ return true
+ }
+
+ // Normal equality suffices
+ return reflect.DeepEqual(expectedInterface, targetInterface)
+ }
+}
+
+// Check tests for deep subset. It uses normal == equality where
+// possible but will scan members of arrays, slices, maps, and fields of
+// structs. It correctly handles recursive types. Functions are equal
+// only if they are both nil.
+func Check(expected, target interface{}) bool {
+ if expected == nil {
+ return true
+ }
+ if target == nil {
+ return false
+ }
+ return checkSubset(
+ reflect.ValueOf(expected),
+ reflect.ValueOf(target),
+ make(map[uintptr]*visit),
+ 0)
+}
+
+// Assert will fatal if not a subset with a useful message.
+// TODO should pretty print and show a colored side-by-side diff?
+func Assert(t Fatalf, expected interface{}, actual interface{}) {
+ if !Check(expected, actual) {
+ t.Fatalf("Did not find expected subset:\n%+v\nInstead found:\n%+v",
+ expected, actual)
+ }
+}
diff --git a/vendor/github.com/flosch/pongo2/.gitattributes b/vendor/github.com/flosch/pongo2/.gitattributes
new file mode 100644
index 0000000..fcadb2c
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/.gitattributes
@@ -0,0 +1 @@
+* text eol=lf
diff --git a/vendor/github.com/flosch/pongo2/.gitignore b/vendor/github.com/flosch/pongo2/.gitignore
new file mode 100644
index 0000000..89c56c0
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/.gitignore
@@ -0,0 +1,42 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+.idea
+.vscode
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.project
+EBNF.txt
+test1.tpl
+pongo2_internal_test.go
+tpl-error.out
+/count.out
+/cover.out
+*.swp
+*.iml
+/cpu.out
+/mem.out
+/pongo2.test
+*.error
+/profile
+/coverage.out
+/pongo2_internal_test.ignore
+go.sum
diff --git a/vendor/github.com/flosch/pongo2/.travis.yml b/vendor/github.com/flosch/pongo2/.travis.yml
new file mode 100644
index 0000000..e39e5d0
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+os:
+ - linux
+ - osx
+go:
+ - 1.12
+script:
+ - go test -v
diff --git a/vendor/github.com/flosch/pongo2/AUTHORS b/vendor/github.com/flosch/pongo2/AUTHORS
new file mode 100644
index 0000000..601697c
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/AUTHORS
@@ -0,0 +1,11 @@
+Main author and maintainer of pongo2:
+
+* Florian Schlachter
+
+Contributors (in no specific order):
+
+* @romanoaugusto88
+* @vitalbh
+* @blaubaer
+
+Feel free to add yourself to the list or to modify your entry if you did a contribution.
diff --git a/vendor/github.com/flosch/pongo2/LICENSE b/vendor/github.com/flosch/pongo2/LICENSE
new file mode 100644
index 0000000..e876f86
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2014 Florian Schlachter
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/flosch/pongo2/README.md b/vendor/github.com/flosch/pongo2/README.md
new file mode 100644
index 0000000..f70f502
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/README.md
@@ -0,0 +1,273 @@
+# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
+
+[![Join the chat at https://gitter.im/flosch/pongo2](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/flosch/pongo2)
+[![GoDoc](https://godoc.org/github.com/flosch/pongo2?status.svg)](https://godoc.org/github.com/flosch/pongo2)
+[![Build Status](https://travis-ci.org/flosch/pongo2.svg?branch=master)](https://travis-ci.org/flosch/pongo2)
+[![Backers on Open Collective](https://opencollective.com/pongo2/backers/badge.svg)](#backers)
+[![Sponsors on Open Collective](https://opencollective.com/pongo2/sponsors/badge.svg)](#sponsors)
+
+pongo2 is the successor of [pongo](https://github.com/flosch/pongo), a Django-syntax like templating-language.
+
+Install/update using `go get` (no dependencies required by pongo2):
+```
+go get -u github.com/flosch/pongo2
+```
+
+Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)).
+
+## First impression of a template
+
+```HTML+Django
+Our admins and users
+{# This is a short example to give you a quick overview of pongo2's syntax. #}
+
+{% macro user_details(user, is_admin=false) %}
+
This user registered {{ user.register_date|naturaltime }}.
+
+
+
The user's biography:
+
{{ user.biography|markdown|truncatewords_html:15 }}
+ read more
+
+ {% if is_admin %}
This user is an admin!
{% endif %}
+
+{% endmacro %}
+
+
+
+
+
Our admins
+ {% for admin in adminlist %}
+ {{ user_details(admin, true) }}
+ {% endfor %}
+
+
Our members
+ {% for user in userlist %}
+ {{ user_details(user) }}
+ {% endfor %}
+
+
+```
+
+## Development status
+
+**Latest stable release**: v3.0 (`go get -u gopkg.in/flosch/pongo2.v3` / [`v3`](https://github.com/flosch/pongo2/tree/v3)-branch)
+
+**Current development**: v4 (`master`-branch)
+
+*Note*: With the release of pongo v4 the branch v2 will be deprecated.
+
+**Deprecated versions** (not supported anymore): v1
+
+| Topic | Status |
+| ------------------------------------ | -------------------------------------------------------------------------------------- |
+| Django version compatibility: | [1.7](https://docs.djangoproject.com/en/1.7/ref/templates/builtins/) |
+| *Missing* (planned) **filters**: | none ([hints](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3)) |
+| *Missing* (planned) **tags**: | none ([hints](https://github.com/flosch/pongo2/blob/master/tags.go#L3)) |
+
+Please also have a look on the [caveats](https://github.com/flosch/pongo2#caveats) and on the [official add-ons](https://github.com/flosch/pongo2#official).
+
+## Features (and new in pongo2)
+
+ * Entirely rewritten from the ground-up.
+ * [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
+ * [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
+ * [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
+ * Additional features:
+ * Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
+ * [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
+
+## Recent API changes within pongo2
+
+If you're using the `master`-branch of pongo2, you might be interested in this section. Since pongo2 is still in development (even though there is a first stable release!), there could be (backwards-incompatible) API changes over time. To keep track of these and therefore make it painless for you to adapt your codebase, I'll list them here.
+
+ * Function signature for tag execution changed: not taking a `bytes.Buffer` anymore; instead `Execute()`-functions are now taking a `TemplateWriter` interface.
+ * Function signature for tag and filter parsing/execution changed (`error` return type changed to `*Error`).
+ * `INodeEvaluator` has been removed and got replaced by `IEvaluator`. You can change your existing tags/filters by simply replacing the interface.
+ * Two new helper functions: [`RenderTemplateFile()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateFile) and [`RenderTemplateString()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateString).
+ * `Template.ExecuteRW()` is now [`Template.ExecuteWriter()`](https://godoc.org/github.com/flosch/pongo2#Template.ExecuteWriter)
+ * `Template.Execute*()` functions do now take a `pongo2.Context` directly (no pointer anymore).
+
+## How you can help
+
+ * Write [filters](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3) / [tags] by forking pongo2 and sending pull requests
+ * Write/improve code tests (use the following command to see what tests are missing: `go test -v -cover -covermode=count -coverprofile=cover.out && go tool cover -html=cover.out` or have a look on [gocover.io/github.com/flosch/pongo2](http://gocover.io/github.com/flosch/pongo2))
+ * Write/improve template tests (see the `template_tests/` directory)
+ * Write middleware, libraries and websites using pongo2. :-)
+
+# Documentation
+
+For a documentation on how the templating language works you can [head over to the Django documentation](https://docs.djangoproject.com/en/dev/topics/templates/). pongo2 aims to be compatible with it.
+
+You can access pongo2's API documentation on [godoc](https://godoc.org/github.com/flosch/pongo2).
+
+## Caveats
+
+### Filters
+
+ * **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
+ * **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
+ * **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
+
+### Tags
+
+ * **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
+ * **now**: takes Go's time format (see **date** and **time**-filter).
+
+### Misc
+
+ * **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
+ `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
+
+# Add-ons, libraries and helpers
+
+## Official
+
+ * [ponginae](https://github.com/flosch/ponginae) - A web-framework for Go (using pongo2).
+ * [pongo2-tools](https://github.com/flosch/pongo2-tools) - Official tools and helpers for pongo2
+ * [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
+
+## 3rd-party
+
+ * [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
+ * [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
+ * [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
+ * [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
+ * [Build'n support for Iris' template engine](https://github.com/kataras/iris)
+ * [pongo2gin](https://gitlab.com/go-box/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates
+ * [pongo2-trans](https://github.com/digitalcrab/pongo2trans) - `trans`-tag implementation for internationalization
+ * [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
+ * [p2cli](https://github.com/wrouesnel/p2cli) - command line templating utility based on pongo2
+
+Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
+
+# API-usage examples
+
+Please see the documentation for a full list of provided API methods.
+
+## A tiny example (template string)
+
+```Go
+// Compile the template first (i. e. creating the AST)
+tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+if err != nil {
+ panic(err)
+}
+// Now you can render the template with the given
+// pongo2.Context how often you want to.
+out, err := tpl.Execute(pongo2.Context{"name": "florian"})
+if err != nil {
+ panic(err)
+}
+fmt.Println(out) // Output: Hello Florian!
+```
+
+## Example server-usage (template file)
+
+```Go
+package main
+
+import (
+ "github.com/flosch/pongo2"
+ "net/http"
+)
+
+// Pre-compiling the templates at application startup using the
+// little Must()-helper function (Must() will panic if FromFile()
+// or FromString() will return with an error - that's it).
+// It's faster to pre-compile it anywhere at startup and only
+// execute the template later.
+var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
+
+func examplePage(w http.ResponseWriter, r *http.Request) {
+ // Execute the template per HTTP request
+ err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+func main() {
+ http.HandleFunc("/", examplePage)
+ http.ListenAndServe(":8080", nil)
+}
+```
+
+# Benchmark
+
+The benchmarks have been run on the my machine (`Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz`) using the command:
+
+ go test -bench . -cpu 1,2,4,8
+
+All benchmarks are compiling (depends on the benchmark) and executing the `template_tests/complex.tpl` template.
+
+The results are:
+
+ BenchmarkExecuteComplexWithSandboxActive 50000 60450 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-2 50000 56998 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-4 50000 60343 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-8 50000 64229 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive 10000 164410 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-2 10000 156682 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-4 10000 164821 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-8 10000 171806 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive 50000 60428 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-2 50000 31887 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-4 100000 22810 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-8 100000 18820 ns/op
+ BenchmarkExecuteComplexWithoutSandbox 50000 56942 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-2 50000 56168 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-4 50000 57838 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-8 50000 60539 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox 10000 162086 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-2 10000 159771 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-4 10000 163826 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-8 10000 169062 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox 50000 57152 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-2 50000 30276 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-4 100000 22065 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-8 100000 18034 ns/op
+
+Benchmarked on October 2nd 2014.
+
+## Contributors
+
+This project exists thanks to all the people who contribute.
+
+
+
+## Backers
+
+Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/pongo2#backer)]
+
+
+
+
+## Sponsors
+
+Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/pongo2#sponsor)]
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/flosch/pongo2/context.go b/vendor/github.com/flosch/pongo2/context.go
new file mode 100644
index 0000000..2934d70
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/context.go
@@ -0,0 +1,136 @@
+package pongo2
+
+import (
+ "regexp"
+
+ "github.com/juju/errors"
+)
+
+var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
+
+var autoescape = true
+
+func SetAutoescape(newValue bool) {
+ autoescape = newValue
+}
+
+// A Context type provides constants, variables, instances or functions to a template.
+//
+// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
+// Currently, context["pongo2"] contains the following keys:
+// 1. version: returns the version string
+//
+// Template examples for accessing items from your context:
+// {{ myconstant }}
+// {{ myfunc("test", 42) }}
+// {{ user.name }}
+// {{ pongo2.version }}
+type Context map[string]interface{}
+
+func (c Context) checkForValidIdentifiers() *Error {
+ for k, v := range c {
+ if !reIdentifiers.MatchString(k) {
+ return &Error{
+ Sender: "checkForValidIdentifiers",
+ OrigError: errors.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
+ }
+ }
+ }
+ return nil
+}
+
+// Update updates this context with the key/value-pairs from another context.
+func (c Context) Update(other Context) Context {
+ for k, v := range other {
+ c[k] = v
+ }
+ return c
+}
+
+// ExecutionContext contains all data important for the current rendering state.
+//
+// If you're writing a custom tag, your tag's Execute()-function will
+// have access to the ExecutionContext. This struct stores anything
+// about the current rendering process's Context including
+// the Context provided by the user (field Public).
+// You can safely use the Private context to provide data to the user's
+// template (like a 'forloop'-information). The Shared-context is used
+// to share data between tags. All ExecutionContexts share this context.
+//
+// Please be careful when accessing the Public data.
+// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
+//
+// To create your own execution context within tags, use the
+// NewChildExecutionContext(parent) function.
+type ExecutionContext struct {
+ template *Template
+
+ Autoescape bool
+ Public Context
+ Private Context
+ Shared Context
+}
+
+var pongo2MetaContext = Context{
+ "version": Version,
+}
+
+func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
+ privateCtx := make(Context)
+
+ // Make the pongo2-related funcs/vars available to the context
+ privateCtx["pongo2"] = pongo2MetaContext
+
+ return &ExecutionContext{
+ template: tpl,
+
+ Public: ctx,
+ Private: privateCtx,
+ Autoescape: autoescape,
+ }
+}
+
+func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
+ newctx := &ExecutionContext{
+ template: parent.template,
+
+ Public: parent.Public,
+ Private: make(Context),
+ Autoescape: parent.Autoescape,
+ }
+ newctx.Shared = parent.Shared
+
+ // Copy all existing private items
+ newctx.Private.Update(parent.Private)
+
+ return newctx
+}
+
+func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
+ return ctx.OrigError(errors.New(msg), token)
+}
+
+func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
+ filename := ctx.template.name
+ var line, col int
+ if token != nil {
+ // No tokens available
+ // TODO: Add location (from where?)
+ filename = token.Filename
+ line = token.Line
+ col = token.Col
+ }
+ return &Error{
+ Template: ctx.template,
+ Filename: filename,
+ Line: line,
+ Column: col,
+ Token: token,
+ Sender: "execution",
+ OrigError: err,
+ }
+}
+
+func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
+ ctx.template.set.logf(format, args...)
+}
diff --git a/vendor/github.com/flosch/pongo2/doc.go b/vendor/github.com/flosch/pongo2/doc.go
new file mode 100644
index 0000000..5a23e2b
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/doc.go
@@ -0,0 +1,31 @@
+// A Django-syntax like template-engine
+//
+// Blog posts about pongo2 (including introduction and migration):
+// https://www.florian-schlachter.de/?tag=pongo2
+//
+// Complete documentation on the template language:
+// https://docs.djangoproject.com/en/dev/topics/templates/
+//
+// Try out pongo2 live in the pongo2 playground:
+// https://www.florian-schlachter.de/pongo2/
+//
+// Make sure to read README.md in the repository as well.
+//
+// A tiny example with template strings:
+//
+// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
+//
+// // Compile the template first (i. e. creating the AST)
+// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+// if err != nil {
+// panic(err)
+// }
+// // Now you can render the template with the given
+// // pongo2.Context how often you want to.
+// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
+// if err != nil {
+// panic(err)
+// }
+// fmt.Println(out) // Output: Hello Fred!
+//
+package pongo2
diff --git a/vendor/github.com/flosch/pongo2/error.go b/vendor/github.com/flosch/pongo2/error.go
new file mode 100644
index 0000000..8aec8c1
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/error.go
@@ -0,0 +1,91 @@
+package pongo2
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+)
+
+// The Error type is being used to address an error during lexing, parsing or
+// execution. If you want to return an error object (for example in your own
+// tag or filter) fill this object with as much information as you have.
+// Make sure "Sender" is always given (if you're returning an error within
+// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
+// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
+type Error struct {
+ Template *Template
+ Filename string
+ Line int
+ Column int
+ Token *Token
+ Sender string
+ OrigError error
+}
+
+func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
+ if e.Template == nil {
+ e.Template = template
+ }
+
+ if e.Token == nil {
+ e.Token = t
+ if e.Line <= 0 {
+ e.Line = t.Line
+ e.Column = t.Col
+ }
+ }
+
+ return e
+}
+
+// Returns a nice formatted error string.
+func (e *Error) Error() string {
+ s := "[Error"
+ if e.Sender != "" {
+ s += " (where: " + e.Sender + ")"
+ }
+ if e.Filename != "" {
+ s += " in " + e.Filename
+ }
+ if e.Line > 0 {
+ s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
+ if e.Token != nil {
+ s += fmt.Sprintf(" near '%s'", e.Token.Val)
+ }
+ }
+ s += "] "
+ s += e.OrigError.Error()
+ return s
+}
+
+// RawLine returns the affected line from the original template, if available.
+func (e *Error) RawLine() (line string, available bool, outErr error) {
+ if e.Line <= 0 || e.Filename == "" {
+ return "", false, nil
+ }
+
+ filename := e.Filename
+ if e.Template != nil {
+ filename = e.Template.set.resolveFilename(e.Template, e.Filename)
+ }
+ file, err := os.Open(filename)
+ if err != nil {
+ return "", false, err
+ }
+ defer func() {
+ err := file.Close()
+ if err != nil && outErr == nil {
+ outErr = err
+ }
+ }()
+
+ scanner := bufio.NewScanner(file)
+ l := 0
+ for scanner.Scan() {
+ l++
+ if l == e.Line {
+ return scanner.Text(), true, nil
+ }
+ }
+ return "", false, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/filters.go b/vendor/github.com/flosch/pongo2/filters.go
new file mode 100644
index 0000000..1092705
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/filters.go
@@ -0,0 +1,143 @@
+package pongo2
+
+import (
+ "fmt"
+
+ "github.com/juju/errors"
+)
+
+// FilterFunction is the type filter functions must fulfil
+type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
+
+var filters map[string]FilterFunction
+
+func init() {
+ filters = make(map[string]FilterFunction)
+}
+
+// FilterExists returns true if the given filter is already registered
+func FilterExists(name string) bool {
+ _, existing := filters[name]
+ return existing
+}
+
+// RegisterFilter registers a new filter. If there's already a filter with the same
+// name, RegisterFilter will panic. You usually want to call this
+// function in the filter's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterFilter(name string, fn FilterFunction) error {
+ if FilterExists(name) {
+ return errors.Errorf("filter with name '%s' is already registered", name)
+ }
+ filters[name] = fn
+ return nil
+}
+
+// ReplaceFilter replaces an already registered filter with a new implementation. Use this
+// function with caution since it allows you to change existing filter behaviour.
+func ReplaceFilter(name string, fn FilterFunction) error {
+ if !FilterExists(name) {
+ return errors.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
+ }
+ filters[name] = fn
+ return nil
+}
+
+// MustApplyFilter behaves like ApplyFilter, but panics on an error.
+func MustApplyFilter(name string, value *Value, param *Value) *Value {
+ val, err := ApplyFilter(name, value, param)
+ if err != nil {
+ panic(err)
+ }
+ return val
+}
+
+// ApplyFilter applies a filter to a given value using the given parameters.
+// Returns a *pongo2.Value or an error.
+func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
+ fn, existing := filters[name]
+ if !existing {
+ return nil, &Error{
+ Sender: "applyfilter",
+ OrigError: errors.Errorf("Filter with name '%s' not found.", name),
+ }
+ }
+
+ // Make sure param is a *Value
+ if param == nil {
+ param = AsValue(nil)
+ }
+
+ return fn(value, param)
+}
+
+type filterCall struct {
+ token *Token
+
+ name string
+ parameter IEvaluator
+
+ filterFunc FilterFunction
+}
+
+func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
+ var param *Value
+ var err *Error
+
+ if fc.parameter != nil {
+ param, err = fc.parameter.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ param = AsValue(nil)
+ }
+
+ filteredValue, err := fc.filterFunc(v, param)
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
+ }
+ return filteredValue, nil
+}
+
+// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
+func (p *Parser) parseFilter() (*filterCall, *Error) {
+ identToken := p.MatchType(TokenIdentifier)
+
+ // Check filter ident
+ if identToken == nil {
+ return nil, p.Error("Filter name must be an identifier.", nil)
+ }
+
+ filter := &filterCall{
+ token: identToken,
+ name: identToken.Val,
+ }
+
+ // Get the appropriate filter function and bind it
+ filterFn, exists := filters[identToken.Val]
+ if !exists {
+ return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
+ }
+
+ filter.filterFunc = filterFn
+
+ // Check for filter-argument (2 tokens needed: ':' ARG)
+ if p.Match(TokenSymbol, ":") != nil {
+ if p.Peek(TokenSymbol, "}}") != nil {
+ return nil, p.Error("Filter parameter required after ':'.", nil)
+ }
+
+ // Get filter argument expression
+ v, err := p.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ filter.parameter = v
+ }
+
+ return filter, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/filters_builtin.go b/vendor/github.com/flosch/pongo2/filters_builtin.go
new file mode 100644
index 0000000..f02b491
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/filters_builtin.go
@@ -0,0 +1,927 @@
+package pongo2
+
+/* Filters that are provided through github.com/flosch/pongo2-addons:
+ ------------------------------------------------------------------
+
+ filesizeformat
+ slugify
+ timesince
+ timeuntil
+
+ Filters that won't be added:
+ ----------------------------
+
+ get_static_prefix (reason: web-framework specific)
+ pprint (reason: python-specific)
+ static (reason: web-framework specific)
+
+ Reconsideration (not implemented yet):
+ --------------------------------------
+
+ force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
+ safeseq (reason: same reason as `force_escape`)
+ unordered_list (python-specific; not sure whether needed or not)
+ dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
+ dictsortreversed (see dictsort)
+*/
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/juju/errors"
+)
+
+func init() {
+ rand.Seed(time.Now().Unix())
+
+ RegisterFilter("escape", filterEscape)
+ RegisterFilter("safe", filterSafe)
+ RegisterFilter("escapejs", filterEscapejs)
+
+ RegisterFilter("add", filterAdd)
+ RegisterFilter("addslashes", filterAddslashes)
+ RegisterFilter("capfirst", filterCapfirst)
+ RegisterFilter("center", filterCenter)
+ RegisterFilter("cut", filterCut)
+ RegisterFilter("date", filterDate)
+ RegisterFilter("default", filterDefault)
+ RegisterFilter("default_if_none", filterDefaultIfNone)
+ RegisterFilter("divisibleby", filterDivisibleby)
+ RegisterFilter("first", filterFirst)
+ RegisterFilter("floatformat", filterFloatformat)
+ RegisterFilter("get_digit", filterGetdigit)
+ RegisterFilter("iriencode", filterIriencode)
+ RegisterFilter("join", filterJoin)
+ RegisterFilter("last", filterLast)
+ RegisterFilter("length", filterLength)
+ RegisterFilter("length_is", filterLengthis)
+ RegisterFilter("linebreaks", filterLinebreaks)
+ RegisterFilter("linebreaksbr", filterLinebreaksbr)
+ RegisterFilter("linenumbers", filterLinenumbers)
+ RegisterFilter("ljust", filterLjust)
+ RegisterFilter("lower", filterLower)
+ RegisterFilter("make_list", filterMakelist)
+ RegisterFilter("phone2numeric", filterPhone2numeric)
+ RegisterFilter("pluralize", filterPluralize)
+ RegisterFilter("random", filterRandom)
+ RegisterFilter("removetags", filterRemovetags)
+ RegisterFilter("rjust", filterRjust)
+ RegisterFilter("slice", filterSlice)
+ RegisterFilter("split", filterSplit)
+ RegisterFilter("stringformat", filterStringformat)
+ RegisterFilter("striptags", filterStriptags)
+ RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
+ RegisterFilter("title", filterTitle)
+ RegisterFilter("truncatechars", filterTruncatechars)
+ RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
+ RegisterFilter("truncatewords", filterTruncatewords)
+ RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
+ RegisterFilter("upper", filterUpper)
+ RegisterFilter("urlencode", filterUrlencode)
+ RegisterFilter("urlize", filterUrlize)
+ RegisterFilter("urlizetrunc", filterUrlizetrunc)
+ RegisterFilter("wordcount", filterWordcount)
+ RegisterFilter("wordwrap", filterWordwrap)
+ RegisterFilter("yesno", filterYesno)
+
+ RegisterFilter("float", filterFloat) // pongo-specific
+ RegisterFilter("integer", filterInteger) // pongo-specific
+}
+
+func filterTruncatecharsHelper(s string, newLen int) string {
+ runes := []rune(s)
+ if newLen < len(runes) {
+ if newLen >= 3 {
+ return fmt.Sprintf("%s...", string(runes[:newLen-3]))
+ }
+ // Not enough space for the ellipsis
+ return string(runes[:newLen])
+ }
+ return string(runes)
+}
+
+func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
+ vLen := len(value)
+ var tagStack []string
+ idx := 0
+
+ for idx < vLen && !cond() {
+ c, s := utf8.DecodeRuneInString(value[idx:])
+ if c == utf8.RuneError {
+ idx += s
+ continue
+ }
+
+ if c == '<' {
+ newOutput.WriteRune(c)
+ idx += s // consume "<"
+
+ if idx+1 < vLen {
+ if value[idx] == '/' {
+ // Close tag
+
+ newOutput.WriteString("/")
+
+ tag := ""
+ idx++ // consume "/"
+
+ for idx < vLen {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ // End of tag found
+ if c2 == '>' {
+ idx++ // consume ">"
+ break
+ }
+ tag += string(c2)
+ idx += size2
+ }
+
+ if len(tagStack) > 0 {
+ // Ideally, the close tag is TOP of tag stack
+ // In malformed HTML, it must not be, so iterate through the stack and remove the tag
+ for i := len(tagStack) - 1; i >= 0; i-- {
+ if tagStack[i] == tag {
+ // Found the tag
+ tagStack[i] = tagStack[len(tagStack)-1]
+ tagStack = tagStack[:len(tagStack)-1]
+ break
+ }
+ }
+ }
+
+ newOutput.WriteString(tag)
+ newOutput.WriteString(">")
+ } else {
+ // Open tag
+
+ tag := ""
+
+ params := false
+ for idx < vLen {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ newOutput.WriteRune(c2)
+
+ // End of tag found
+ if c2 == '>' {
+ idx++ // consume ">"
+ break
+ }
+
+ if !params {
+ if c2 == ' ' {
+ params = true
+ } else {
+ tag += string(c2)
+ }
+ }
+
+ idx += size2
+ }
+
+ // Add tag to stack
+ tagStack = append(tagStack, tag)
+ }
+ }
+ } else {
+ idx = fn(c, s, idx)
+ }
+ }
+
+ finalize()
+
+ for i := len(tagStack) - 1; i >= 0; i-- {
+ tag := tagStack[i]
+ // Close everything from the regular tag stack
+ newOutput.WriteString(fmt.Sprintf("%s>", tag))
+ }
+}
+
+func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ newLen := param.Integer()
+ return AsValue(filterTruncatecharsHelper(s, newLen)), nil
+}
+
+func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
+ value := in.String()
+ newLen := max(param.Integer()-3, 0)
+
+ newOutput := bytes.NewBuffer(nil)
+
+ textcounter := 0
+
+ filterTruncateHTMLHelper(value, newOutput, func() bool {
+ return textcounter >= newLen
+ }, func(c rune, s int, idx int) int {
+ textcounter++
+ newOutput.WriteRune(c)
+
+ return idx + s
+ }, func() {
+ if textcounter >= newLen && textcounter < len(value) {
+ newOutput.WriteString("...")
+ }
+ })
+
+ return AsSafeValue(newOutput.String()), nil
+}
+
+func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
+ words := strings.Fields(in.String())
+ n := param.Integer()
+ if n <= 0 {
+ return AsValue(""), nil
+ }
+ nlen := min(len(words), n)
+ out := make([]string, 0, nlen)
+ for i := 0; i < nlen; i++ {
+ out = append(out, words[i])
+ }
+
+ if n < len(words) {
+ out = append(out, "...")
+ }
+
+ return AsValue(strings.Join(out, " ")), nil
+}
+
+func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
+ value := in.String()
+ newLen := max(param.Integer(), 0)
+
+ newOutput := bytes.NewBuffer(nil)
+
+ wordcounter := 0
+
+ filterTruncateHTMLHelper(value, newOutput, func() bool {
+ return wordcounter >= newLen
+ }, func(_ rune, _ int, idx int) int {
+ // Get next word
+ wordFound := false
+
+ for idx < len(value) {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ if c2 == '<' {
+ // HTML tag start, don't consume it
+ return idx
+ }
+
+ newOutput.WriteRune(c2)
+ idx += size2
+
+ if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
+ // Word ends here, stop capturing it now
+ break
+ } else {
+ wordFound = true
+ }
+ }
+
+ if wordFound {
+ wordcounter++
+ }
+
+ return idx
+ }, func() {
+ if wordcounter >= newLen {
+ newOutput.WriteString("...")
+ }
+ })
+
+ return AsSafeValue(newOutput.String()), nil
+}
+
+func filterEscape(in *Value, param *Value) (*Value, *Error) {
+ output := strings.Replace(in.String(), "&", "&", -1)
+ output = strings.Replace(output, ">", ">", -1)
+ output = strings.Replace(output, "<", "<", -1)
+ output = strings.Replace(output, "\"", """, -1)
+ output = strings.Replace(output, "'", "'", -1)
+ return AsValue(output), nil
+}
+
+func filterSafe(in *Value, param *Value) (*Value, *Error) {
+ return in, nil // nothing to do here, just to keep track of the safe application
+}
+
+func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
+ sin := in.String()
+
+ var b bytes.Buffer
+
+ idx := 0
+ for idx < len(sin) {
+ c, size := utf8.DecodeRuneInString(sin[idx:])
+ if c == utf8.RuneError {
+ idx += size
+ continue
+ }
+
+ if c == '\\' {
+ // Escape seq?
+ if idx+1 < len(sin) {
+ switch sin[idx+1] {
+ case 'r':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
+ idx += 2
+ continue
+ case 'n':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
+ idx += 2
+ continue
+ /*case '\'':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
+ idx += 2
+ continue
+ case '"':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
+ idx += 2
+ continue*/
+ }
+ }
+ }
+
+ if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
+ b.WriteRune(c)
+ } else {
+ b.WriteString(fmt.Sprintf(`\u%04X`, c))
+ }
+
+ idx += size
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterAdd(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNumber() && param.IsNumber() {
+ if in.IsFloat() || param.IsFloat() {
+ return AsValue(in.Float() + param.Float()), nil
+ }
+ return AsValue(in.Integer() + param.Integer()), nil
+ }
+ // If in/param is not a number, we're relying on the
+ // Value's String() conversion and just add them both together
+ return AsValue(in.String() + param.String()), nil
+}
+
+func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
+ output := strings.Replace(in.String(), "\\", "\\\\", -1)
+ output = strings.Replace(output, "\"", "\\\"", -1)
+ output = strings.Replace(output, "'", "\\'", -1)
+ return AsValue(output), nil
+}
+
+func filterCut(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
+}
+
+func filterLength(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Len()), nil
+}
+
+func filterLengthis(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Len() == param.Integer()), nil
+}
+
+func filterDefault(in *Value, param *Value) (*Value, *Error) {
+ if !in.IsTrue() {
+ return param, nil
+ }
+ return in, nil
+}
+
+func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNil() {
+ return param, nil
+ }
+ return in, nil
+}
+
+func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
+ if param.Integer() == 0 {
+ return AsValue(false), nil
+ }
+ return AsValue(in.Integer()%param.Integer() == 0), nil
+}
+
+func filterFirst(in *Value, param *Value) (*Value, *Error) {
+ if in.CanSlice() && in.Len() > 0 {
+ return in.Index(0), nil
+ }
+ return AsValue(""), nil
+}
+
+func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
+ val := in.Float()
+
+ decimals := -1
+ if !param.IsNil() {
+ // Any argument provided?
+ decimals = param.Integer()
+ }
+
+ // if the argument is not a number (e. g. empty), the default
+ // behaviour is trim the result
+ trim := !param.IsNumber()
+
+ if decimals <= 0 {
+ // argument is negative or zero, so we
+ // want the output being trimmed
+ decimals = -decimals
+ trim = true
+ }
+
+ if trim {
+ // Remove zeroes
+ if float64(int(val)) == val {
+ return AsValue(in.Integer()), nil
+ }
+ }
+
+ return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
+}
+
+func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
+ i := param.Integer()
+ l := len(in.String()) // do NOT use in.Len() here!
+ if i <= 0 || i > l {
+ return in, nil
+ }
+ return AsValue(in.String()[l-i] - 48), nil
+}
+
+const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
+
+func filterIriencode(in *Value, param *Value) (*Value, *Error) {
+ var b bytes.Buffer
+
+ sin := in.String()
+ for _, r := range sin {
+ if strings.IndexRune(filterIRIChars, r) >= 0 {
+ b.WriteRune(r)
+ } else {
+ b.WriteString(url.QueryEscape(string(r)))
+ }
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterJoin(in *Value, param *Value) (*Value, *Error) {
+ if !in.CanSlice() {
+ return in, nil
+ }
+ sep := param.String()
+ sl := make([]string, 0, in.Len())
+ for i := 0; i < in.Len(); i++ {
+ sl = append(sl, in.Index(i).String())
+ }
+ return AsValue(strings.Join(sl, sep)), nil
+}
+
+func filterLast(in *Value, param *Value) (*Value, *Error) {
+ if in.CanSlice() && in.Len() > 0 {
+ return in.Index(in.Len() - 1), nil
+ }
+ return AsValue(""), nil
+}
+
+func filterUpper(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.ToUpper(in.String())), nil
+}
+
+func filterLower(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.ToLower(in.String())), nil
+}
+
+func filterMakelist(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ result := make([]string, 0, len(s))
+ for _, c := range s {
+ result = append(result, string(c))
+ }
+ return AsValue(result), nil
+}
+
+func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
+ if in.Len() <= 0 {
+ return AsValue(""), nil
+ }
+ t := in.String()
+ r, size := utf8.DecodeRuneInString(t)
+ return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
+}
+
+func filterCenter(in *Value, param *Value) (*Value, *Error) {
+ width := param.Integer()
+ slen := in.Len()
+ if width <= slen {
+ return in, nil
+ }
+
+ spaces := width - slen
+ left := spaces/2 + spaces%2
+ right := spaces / 2
+
+ return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
+ in.String(), strings.Repeat(" ", right))), nil
+}
+
+func filterDate(in *Value, param *Value) (*Value, *Error) {
+ t, isTime := in.Interface().(time.Time)
+ if !isTime {
+ return nil, &Error{
+ Sender: "filter:date",
+ OrigError: errors.New("filter input argument must be of type 'time.Time'"),
+ }
+ }
+ return AsValue(t.Format(param.String())), nil
+}
+
+func filterFloat(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Float()), nil
+}
+
+func filterInteger(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Integer()), nil
+}
+
+func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
+ if in.Len() == 0 {
+ return in, nil
+ }
+
+ var b bytes.Buffer
+
+ // Newline =
+ // Double newline =
...
+ lines := strings.Split(in.String(), "\n")
+ lenlines := len(lines)
+
+ opened := false
+
+ for idx, line := range lines {
+
+ if !opened {
+ b.WriteString("
")
+ opened = true
+ }
+
+ b.WriteString(line)
+
+ if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
+ // We've not reached the end
+ if strings.TrimSpace(lines[idx+1]) == "" {
+ // Next line is empty
+ if opened {
+ b.WriteString("
")
+ opened = false
+ }
+ } else {
+ b.WriteString(" ")
+ }
+ }
+ }
+
+ if opened {
+ b.WriteString("
")
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterSplit(in *Value, param *Value) (*Value, *Error) {
+ chunks := strings.Split(in.String(), param.String())
+
+ return AsValue(chunks), nil
+}
+
+func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.Replace(in.String(), "\n", " ", -1)), nil
+}
+
+func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
+ lines := strings.Split(in.String(), "\n")
+ output := make([]string, 0, len(lines))
+ for idx, line := range lines {
+ output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
+ }
+ return AsValue(strings.Join(output, "\n")), nil
+}
+
+func filterLjust(in *Value, param *Value) (*Value, *Error) {
+ times := param.Integer() - in.Len()
+ if times < 0 {
+ times = 0
+ }
+ return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
+}
+
+func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(url.QueryEscape(in.String())), nil
+}
+
+// TODO: This regexp could do some work
+var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
+var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
+
+func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
+ var soutErr error
+ sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
+ var prefix string
+ var suffix string
+ if strings.HasPrefix(raw_url, " ") {
+ prefix = " "
+ }
+ if strings.HasSuffix(raw_url, " ") {
+ suffix = " "
+ }
+
+ raw_url = strings.TrimSpace(raw_url)
+
+ t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
+ if err != nil {
+ soutErr = err
+ return ""
+ }
+ url := t.String()
+
+ if !strings.HasPrefix(url, "http") {
+ url = fmt.Sprintf("http://%s", url)
+ }
+
+ title := raw_url
+
+ if trunc > 3 && len(title) > trunc {
+ title = fmt.Sprintf("%s...", title[:trunc-3])
+ }
+
+ if autoescape {
+ t, err := ApplyFilter("escape", AsValue(title), nil)
+ if err != nil {
+ soutErr = err
+ return ""
+ }
+ title = t.String()
+ }
+
+ return fmt.Sprintf(`%s%s%s`, prefix, url, title, suffix)
+ })
+ if soutErr != nil {
+ return "", soutErr
+ }
+
+ sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
+ title := mail
+
+ if trunc > 3 && len(title) > trunc {
+ title = fmt.Sprintf("%s...", title[:trunc-3])
+ }
+
+ return fmt.Sprintf(`%s`, mail, title)
+ })
+
+ return sout, nil
+}
+
+func filterUrlize(in *Value, param *Value) (*Value, *Error) {
+ autoescape := true
+ if param.IsBool() {
+ autoescape = param.Bool()
+ }
+
+ s, err := filterUrlizeHelper(in.String(), autoescape, -1)
+ if err != nil {
+
+ }
+
+ return AsValue(s), nil
+}
+
+func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
+ s, err := filterUrlizeHelper(in.String(), true, param.Integer())
+ if err != nil {
+ return nil, &Error{
+ Sender: "filter:urlizetrunc",
+ OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
+ }
+ }
+ return AsValue(s), nil
+}
+
+func filterStringformat(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
+}
+
+var reStriptags = regexp.MustCompile("<[^>]*?>")
+
+func filterStriptags(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+
+ // Strip all tags
+ s = reStriptags.ReplaceAllString(s, "")
+
+ return AsValue(strings.TrimSpace(s)), nil
+}
+
+// https://en.wikipedia.org/wiki/Phoneword
+var filterPhone2numericMap = map[string]string{
+ "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
+ "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
+ "w": "9", "x": "9", "y": "9", "z": "9",
+}
+
+func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
+ sin := in.String()
+ for k, v := range filterPhone2numericMap {
+ sin = strings.Replace(sin, k, v, -1)
+ sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
+ }
+ return AsValue(sin), nil
+}
+
+func filterPluralize(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNumber() {
+ // Works only on numbers
+ if param.Len() > 0 {
+ endings := strings.Split(param.String(), ",")
+ if len(endings) > 2 {
+ return nil, &Error{
+ Sender: "filter:pluralize",
+ OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
+ }
+ }
+ if len(endings) == 1 {
+ // 1 argument
+ if in.Integer() != 1 {
+ return AsValue(endings[0]), nil
+ }
+ } else {
+ if in.Integer() != 1 {
+ // 2 arguments
+ return AsValue(endings[1]), nil
+ }
+ return AsValue(endings[0]), nil
+ }
+ } else {
+ if in.Integer() != 1 {
+ // return default 's'
+ return AsValue("s"), nil
+ }
+ }
+
+ return AsValue(""), nil
+ }
+ return nil, &Error{
+ Sender: "filter:pluralize",
+ OrigError: errors.New("filter 'pluralize' does only work on numbers"),
+ }
+}
+
+func filterRandom(in *Value, param *Value) (*Value, *Error) {
+ if !in.CanSlice() || in.Len() <= 0 {
+ return in, nil
+ }
+ i := rand.Intn(in.Len())
+ return in.Index(i), nil
+}
+
+func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ tags := strings.Split(param.String(), ",")
+
+ // Strip only specific tags
+ for _, tag := range tags {
+ re := regexp.MustCompile(fmt.Sprintf("?%s/?>", tag))
+ s = re.ReplaceAllString(s, "")
+ }
+
+ return AsValue(strings.TrimSpace(s)), nil
+}
+
+func filterRjust(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
+}
+
+func filterSlice(in *Value, param *Value) (*Value, *Error) {
+ comp := strings.Split(param.String(), ":")
+ if len(comp) != 2 {
+ return nil, &Error{
+ Sender: "filter:slice",
+ OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
+ }
+ }
+
+ if !in.CanSlice() {
+ return in, nil
+ }
+
+ from := AsValue(comp[0]).Integer()
+ to := in.Len()
+
+ if from > to {
+ from = to
+ }
+
+ vto := AsValue(comp[1]).Integer()
+ if vto >= from && vto <= in.Len() {
+ to = vto
+ }
+
+ return in.Slice(from, to), nil
+}
+
+func filterTitle(in *Value, param *Value) (*Value, *Error) {
+ if !in.IsString() {
+ return AsValue(""), nil
+ }
+ return AsValue(strings.Title(strings.ToLower(in.String()))), nil
+}
+
+func filterWordcount(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(len(strings.Fields(in.String()))), nil
+}
+
+func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
+ words := strings.Fields(in.String())
+ wordsLen := len(words)
+ wrapAt := param.Integer()
+ if wrapAt <= 0 {
+ return in, nil
+ }
+
+ linecount := wordsLen/wrapAt + wordsLen%wrapAt
+ lines := make([]string, 0, linecount)
+ for i := 0; i < linecount; i++ {
+ lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
+ }
+ return AsValue(strings.Join(lines, "\n")), nil
+}
+
+func filterYesno(in *Value, param *Value) (*Value, *Error) {
+ choices := map[int]string{
+ 0: "yes",
+ 1: "no",
+ 2: "maybe",
+ }
+ paramString := param.String()
+ customChoices := strings.Split(paramString, ",")
+ if len(paramString) > 0 {
+ if len(customChoices) > 3 {
+ return nil, &Error{
+ Sender: "filter:yesno",
+ OrigError: errors.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
+ }
+ }
+ if len(customChoices) < 2 {
+ return nil, &Error{
+ Sender: "filter:yesno",
+ OrigError: errors.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
+ }
+ }
+
+ // Map to the options now
+ choices[0] = customChoices[0]
+ choices[1] = customChoices[1]
+ if len(customChoices) == 3 {
+ choices[2] = customChoices[2]
+ }
+ }
+
+ // maybe
+ if in.IsNil() {
+ return AsValue(choices[2]), nil
+ }
+
+ // yes
+ if in.IsTrue() {
+ return AsValue(choices[0]), nil
+ }
+
+ // no
+ return AsValue(choices[1]), nil
+}
diff --git a/vendor/github.com/flosch/pongo2/go.mod b/vendor/github.com/flosch/pongo2/go.mod
new file mode 100644
index 0000000..06b6c25
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/go.mod
@@ -0,0 +1,13 @@
+module github.com/flosch/pongo2
+
+require (
+ github.com/go-check/check v0.0.0-20180628173108-788fd7840127
+ github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5
+ github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect
+ github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073 // indirect
+ github.com/kr/pretty v0.1.0 // indirect
+ github.com/mattn/goveralls v0.0.2 // indirect
+ golang.org/x/tools v0.0.0-20181221001348-537d06c36207 // indirect
+ gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect
+ gopkg.in/yaml.v2 v2.2.2 // indirect
+)
diff --git a/vendor/github.com/flosch/pongo2/helpers.go b/vendor/github.com/flosch/pongo2/helpers.go
new file mode 100644
index 0000000..880dbc0
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/helpers.go
@@ -0,0 +1,15 @@
+package pongo2
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/flosch/pongo2/lexer.go b/vendor/github.com/flosch/pongo2/lexer.go
new file mode 100644
index 0000000..67b0b95
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/lexer.go
@@ -0,0 +1,432 @@
+package pongo2
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/juju/errors"
+)
+
+const (
+ TokenError = iota
+ EOF
+
+ TokenHTML
+
+ TokenKeyword
+ TokenIdentifier
+ TokenString
+ TokenNumber
+ TokenSymbol
+)
+
+var (
+ tokenSpaceChars = " \n\r\t"
+ tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
+ tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
+ tokenDigits = "0123456789"
+
+ // Available symbols in pongo2 (within filters/tag)
+ TokenSymbols = []string{
+ // 3-Char symbols
+ "{{-", "-}}", "{%-", "-%}",
+
+ // 2-Char symbols
+ "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
+
+ // 1-Char symbol
+ "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
+ }
+
+ // Available keywords in pongo2
+ TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
+)
+
+type TokenType int
+type Token struct {
+ Filename string
+ Typ TokenType
+ Val string
+ Line int
+ Col int
+ TrimWhitespaces bool
+}
+
+type lexerStateFn func() lexerStateFn
+type lexer struct {
+ name string
+ input string
+ start int // start pos of the item
+ pos int // current pos
+ width int // width of last rune
+ tokens []*Token
+ errored bool
+ startline int
+ startcol int
+ line int
+ col int
+
+ inVerbatim bool
+ verbatimName string
+}
+
+func (t *Token) String() string {
+ val := t.Val
+ if len(val) > 1000 {
+ val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:len(val)])
+ }
+
+ typ := ""
+ switch t.Typ {
+ case TokenHTML:
+ typ = "HTML"
+ case TokenError:
+ typ = "Error"
+ case TokenIdentifier:
+ typ = "Identifier"
+ case TokenKeyword:
+ typ = "Keyword"
+ case TokenNumber:
+ typ = "Number"
+ case TokenString:
+ typ = "String"
+ case TokenSymbol:
+ typ = "Symbol"
+ default:
+ typ = "Unknown"
+ }
+
+ return fmt.Sprintf("",
+ typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces)
+}
+
+func lex(name string, input string) ([]*Token, *Error) {
+ l := &lexer{
+ name: name,
+ input: input,
+ tokens: make([]*Token, 0, 100),
+ line: 1,
+ col: 1,
+ startline: 1,
+ startcol: 1,
+ }
+ l.run()
+ if l.errored {
+ errtoken := l.tokens[len(l.tokens)-1]
+ return nil, &Error{
+ Filename: name,
+ Line: errtoken.Line,
+ Column: errtoken.Col,
+ Sender: "lexer",
+ OrigError: errors.New(errtoken.Val),
+ }
+ }
+ return l.tokens, nil
+}
+
+func (l *lexer) value() string {
+ return l.input[l.start:l.pos]
+}
+
+func (l *lexer) length() int {
+ return l.pos - l.start
+}
+
+func (l *lexer) emit(t TokenType) {
+ tok := &Token{
+ Filename: l.name,
+ Typ: t,
+ Val: l.value(),
+ Line: l.startline,
+ Col: l.startcol,
+ }
+
+ if t == TokenString {
+ // Escape sequence \" in strings
+ tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
+ tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
+ }
+
+ if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) {
+ tok.TrimWhitespaces = true
+ tok.Val = strings.Replace(tok.Val, "-", "", -1)
+ }
+
+ l.tokens = append(l.tokens, tok)
+ l.start = l.pos
+ l.startline = l.line
+ l.startcol = l.col
+}
+
+func (l *lexer) next() rune {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return EOF
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = w
+ l.pos += l.width
+ l.col += l.width
+ return r
+}
+
+func (l *lexer) backup() {
+ l.pos -= l.width
+ l.col -= l.width
+}
+
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func (l *lexer) ignore() {
+ l.start = l.pos
+ l.startline = l.line
+ l.startcol = l.col
+}
+
+func (l *lexer) accept(what string) bool {
+ if strings.IndexRune(what, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *lexer) acceptRun(what string) {
+ for strings.IndexRune(what, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
+ t := &Token{
+ Filename: l.name,
+ Typ: TokenError,
+ Val: fmt.Sprintf(format, args...),
+ Line: l.startline,
+ Col: l.startcol,
+ }
+ l.tokens = append(l.tokens, t)
+ l.errored = true
+ l.startline = l.line
+ l.startcol = l.col
+ return nil
+}
+
+func (l *lexer) eof() bool {
+ return l.start >= len(l.input)-1
+}
+
+func (l *lexer) run() {
+ for {
+ // TODO: Support verbatim tag names
+ // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
+ if l.inVerbatim {
+ name := l.verbatimName
+ if name != "" {
+ name += " "
+ }
+ if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ w := len("{% endverbatim %}")
+ l.pos += w
+ l.col += w
+ l.ignore()
+ l.inVerbatim = false
+ }
+ } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ l.inVerbatim = true
+ w := len("{% verbatim %}")
+ l.pos += w
+ l.col += w
+ l.ignore()
+ }
+
+ if !l.inVerbatim {
+ // Ignore single-line comments {# ... #}
+ if strings.HasPrefix(l.input[l.pos:], "{#") {
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+
+ l.pos += 2 // pass '{#'
+ l.col += 2
+
+ for {
+ switch l.peek() {
+ case EOF:
+ l.errorf("Single-line comment not closed.")
+ return
+ case '\n':
+ l.errorf("Newline not permitted in a single-line comment.")
+ return
+ }
+
+ if strings.HasPrefix(l.input[l.pos:], "#}") {
+ l.pos += 2 // pass '#}'
+ l.col += 2
+ break
+ }
+
+ l.next()
+ }
+ l.ignore() // ignore whole comment
+
+ // Comment skipped
+ continue // next token
+ }
+
+ if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
+ strings.HasPrefix(l.input[l.pos:], "{%") { // tag
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ l.tokenize()
+ if l.errored {
+ return
+ }
+ continue
+ }
+ }
+
+ switch l.peek() {
+ case '\n':
+ l.line++
+ l.col = 0
+ }
+ if l.next() == EOF {
+ break
+ }
+ }
+
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+
+ if l.inVerbatim {
+ l.errorf("verbatim-tag not closed, got EOF.")
+ }
+}
+
+func (l *lexer) tokenize() {
+ for state := l.stateCode; state != nil; {
+ state = state()
+ }
+}
+
+func (l *lexer) stateCode() lexerStateFn {
+outer_loop:
+ for {
+ switch {
+ case l.accept(tokenSpaceChars):
+ if l.value() == "\n" {
+ return l.errorf("Newline not allowed within tag/variable.")
+ }
+ l.ignore()
+ continue
+ case l.accept(tokenIdentifierChars):
+ return l.stateIdentifier
+ case l.accept(tokenDigits):
+ return l.stateNumber
+ case l.accept(`"'`):
+ return l.stateString
+ }
+
+ // Check for symbol
+ for _, sym := range TokenSymbols {
+ if strings.HasPrefix(l.input[l.start:], sym) {
+ l.pos += len(sym)
+ l.col += l.length()
+ l.emit(TokenSymbol)
+
+ if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" {
+ // Tag/variable end, return after emit
+ return nil
+ }
+
+ continue outer_loop
+ }
+ }
+
+ break
+ }
+
+ // Normal shut down
+ return nil
+}
+
+func (l *lexer) stateIdentifier() lexerStateFn {
+ l.acceptRun(tokenIdentifierChars)
+ l.acceptRun(tokenIdentifierCharsWithDigits)
+ for _, kw := range TokenKeywords {
+ if kw == l.value() {
+ l.emit(TokenKeyword)
+ return l.stateCode
+ }
+ }
+ l.emit(TokenIdentifier)
+ return l.stateCode
+}
+
+func (l *lexer) stateNumber() lexerStateFn {
+ l.acceptRun(tokenDigits)
+ if l.accept(tokenIdentifierCharsWithDigits) {
+ // This seems to be an identifier starting with a number.
+ // See https://github.com/flosch/pongo2/issues/151
+ return l.stateIdentifier()
+ }
+ /*
+ Maybe context-sensitive number lexing?
+ * comments.0.Text // first comment
+ * usercomments.1.0 // second user, first comment
+ * if (score >= 8.5) // 8.5 as a number
+
+ if l.peek() == '.' {
+ l.accept(".")
+ if !l.accept(tokenDigits) {
+ return l.errorf("Malformed number.")
+ }
+ l.acceptRun(tokenDigits)
+ }
+ */
+ l.emit(TokenNumber)
+ return l.stateCode
+}
+
+func (l *lexer) stateString() lexerStateFn {
+ quotationMark := l.value()
+ l.ignore()
+ l.startcol-- // we're starting the position at the first "
+ for !l.accept(quotationMark) {
+ switch l.next() {
+ case '\\':
+ // escape sequence
+ switch l.peek() {
+ case '"', '\\':
+ l.next()
+ default:
+ return l.errorf("Unknown escape sequence: \\%c", l.peek())
+ }
+ case EOF:
+ return l.errorf("Unexpected EOF, string not closed.")
+ case '\n':
+ return l.errorf("Newline in string is not allowed.")
+ }
+ }
+ l.backup()
+ l.emit(TokenString)
+
+ l.next()
+ l.ignore()
+
+ return l.stateCode
+}
diff --git a/vendor/github.com/flosch/pongo2/nodes.go b/vendor/github.com/flosch/pongo2/nodes.go
new file mode 100644
index 0000000..5b039cd
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/nodes.go
@@ -0,0 +1,16 @@
+package pongo2
+
+// The root document
+type nodeDocument struct {
+ Nodes []INode
+}
+
+func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, n := range doc.Nodes {
+ err := n.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/flosch/pongo2/nodes_html.go b/vendor/github.com/flosch/pongo2/nodes_html.go
new file mode 100644
index 0000000..c735def
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/nodes_html.go
@@ -0,0 +1,23 @@
+package pongo2
+
+import (
+ "strings"
+)
+
+type nodeHTML struct {
+ token *Token
+ trimLeft bool
+ trimRight bool
+}
+
+func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ res := n.token.Val
+ if n.trimLeft {
+ res = strings.TrimLeft(res, tokenSpaceChars)
+ }
+ if n.trimRight {
+ res = strings.TrimRight(res, tokenSpaceChars)
+ }
+ writer.WriteString(res)
+ return nil
+}
diff --git a/vendor/github.com/flosch/pongo2/nodes_wrapper.go b/vendor/github.com/flosch/pongo2/nodes_wrapper.go
new file mode 100644
index 0000000..d1bcb8d
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/nodes_wrapper.go
@@ -0,0 +1,16 @@
+package pongo2
+
+type NodeWrapper struct {
+ Endtag string
+ nodes []INode
+}
+
+func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, n := range wrapper.nodes {
+ err := n.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/flosch/pongo2/options.go b/vendor/github.com/flosch/pongo2/options.go
new file mode 100644
index 0000000..9c39e46
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/options.go
@@ -0,0 +1,26 @@
+package pongo2
+
+// Options allow you to change the behavior of template-engine.
+// You can change the options before calling the Execute method.
+type Options struct {
+ // If this is set to true the first newline after a block is removed (block, not variable tag!). Defaults to false.
+ TrimBlocks bool
+
+ // If this is set to true leading spaces and tabs are stripped from the start of a line to a block. Defaults to false
+ LStripBlocks bool
+}
+
+func newOptions() *Options {
+ return &Options{
+ TrimBlocks: false,
+ LStripBlocks: false,
+ }
+}
+
+// Update updates this options from another options.
+func (opt *Options) Update(other *Options) *Options {
+ opt.TrimBlocks = other.TrimBlocks
+ opt.LStripBlocks = other.LStripBlocks
+
+ return opt
+}
diff --git a/vendor/github.com/flosch/pongo2/parser.go b/vendor/github.com/flosch/pongo2/parser.go
new file mode 100644
index 0000000..2279e3c
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/parser.go
@@ -0,0 +1,309 @@
+package pongo2
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/juju/errors"
+)
+
+type INode interface {
+ Execute(*ExecutionContext, TemplateWriter) *Error
+}
+
+type IEvaluator interface {
+ INode
+ GetPositionToken() *Token
+ Evaluate(*ExecutionContext) (*Value, *Error)
+ FilterApplied(name string) bool
+}
+
+// The parser provides you a comprehensive and easy tool to
+// work with the template document and arguments provided by
+// the user for your custom tag.
+//
+// The parser works on a token list which will be provided by pongo2.
+// A token is a unit you can work with. Tokens are either of type identifier,
+// string, number, keyword, HTML or symbol.
+//
+// (See Token's documentation for more about tokens)
+type Parser struct {
+ name string
+ idx int
+ tokens []*Token
+ lastToken *Token
+
+ // if the parser parses a template document, here will be
+ // a reference to it (needed to access the template through Tags)
+ template *Template
+}
+
+// Creates a new parser to parse tokens.
+// Used inside pongo2 to parse documents and to provide an easy-to-use
+// parser for tag authors
+func newParser(name string, tokens []*Token, template *Template) *Parser {
+ p := &Parser{
+ name: name,
+ tokens: tokens,
+ template: template,
+ }
+ if len(tokens) > 0 {
+ p.lastToken = tokens[len(tokens)-1]
+ }
+ return p
+}
+
+// Consume one token. It will be gone forever.
+func (p *Parser) Consume() {
+ p.ConsumeN(1)
+}
+
+// Consume N tokens. They will be gone forever.
+func (p *Parser) ConsumeN(count int) {
+ p.idx += count
+}
+
+// Returns the current token.
+func (p *Parser) Current() *Token {
+ return p.Get(p.idx)
+}
+
+// Returns the CURRENT token if the given type matches.
+// Consumes this token on success.
+func (p *Parser) MatchType(typ TokenType) *Token {
+ if t := p.PeekType(typ); t != nil {
+ p.Consume()
+ return t
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type AND value matches.
+// Consumes this token on success.
+func (p *Parser) Match(typ TokenType, val string) *Token {
+ if t := p.Peek(typ, val); t != nil {
+ p.Consume()
+ return t
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type AND *one* of
+// the given values matches.
+// Consumes this token on success.
+func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
+ for _, val := range vals {
+ if t := p.Peek(typ, val); t != nil {
+ p.Consume()
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type matches.
+// It DOES NOT consume the token.
+func (p *Parser) PeekType(typ TokenType) *Token {
+ return p.PeekTypeN(0, typ)
+}
+
+// Returns the CURRENT token if the given type AND value matches.
+// It DOES NOT consume the token.
+func (p *Parser) Peek(typ TokenType, val string) *Token {
+ return p.PeekN(0, typ, val)
+}
+
+// Returns the CURRENT token if the given type AND *one* of
+// the given values matches.
+// It DOES NOT consume the token.
+func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
+ for _, v := range vals {
+ t := p.PeekN(0, typ, v)
+ if t != nil {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the tokens[current position + shift] token if the
+// given type AND value matches for that token.
+// DOES NOT consume the token.
+func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
+ t := p.Get(p.idx + shift)
+ if t != nil {
+ if t.Typ == typ && t.Val == val {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the tokens[current position + shift] token if the given type matches.
+// DOES NOT consume the token for that token.
+func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
+ t := p.Get(p.idx + shift)
+ if t != nil {
+ if t.Typ == typ {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the UNCONSUMED token count.
+func (p *Parser) Remaining() int {
+ return len(p.tokens) - p.idx
+}
+
+// Returns the total token count.
+func (p *Parser) Count() int {
+ return len(p.tokens)
+}
+
+// Returns tokens[i] or NIL (if i >= len(tokens))
+func (p *Parser) Get(i int) *Token {
+ if i < len(p.tokens) && i >= 0 {
+ return p.tokens[i]
+ }
+ return nil
+}
+
+// Returns tokens[current-position + shift] or NIL
+// (if (current-position + i) >= len(tokens))
+func (p *Parser) GetR(shift int) *Token {
+ i := p.idx + shift
+ return p.Get(i)
+}
+
+// Error produces a nice error message and returns an error-object.
+// The 'token'-argument is optional. If provided, it will take
+// the token's position information. If not provided, it will
+// automatically use the CURRENT token's position information.
+func (p *Parser) Error(msg string, token *Token) *Error {
+ if token == nil {
+ // Set current token
+ token = p.Current()
+ if token == nil {
+ // Set to last token
+ if len(p.tokens) > 0 {
+ token = p.tokens[len(p.tokens)-1]
+ }
+ }
+ }
+ var line, col int
+ if token != nil {
+ line = token.Line
+ col = token.Col
+ }
+ return &Error{
+ Template: p.template,
+ Filename: p.name,
+ Sender: "parser",
+ Line: line,
+ Column: col,
+ Token: token,
+ OrigError: errors.New(msg),
+ }
+}
+
+// Wraps all nodes between starting tag and "{% endtag %}" and provides
+// one simple interface to execute the wrapped nodes.
+// It returns a parser to process provided arguments to the tag.
+func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
+ wrapper := &NodeWrapper{}
+
+ var tagArgs []*Token
+
+ for p.Remaining() > 0 {
+ // New tag, check whether we have to stop wrapping here
+ if p.Peek(TokenSymbol, "{%") != nil {
+ tagIdent := p.PeekTypeN(1, TokenIdentifier)
+
+ if tagIdent != nil {
+ // We've found a (!) end-tag
+
+ found := false
+ for _, n := range names {
+ if tagIdent.Val == n {
+ found = true
+ break
+ }
+ }
+
+ // We only process the tag if we've found an end tag
+ if found {
+ // Okay, endtag found.
+ p.ConsumeN(2) // '{%' tagname
+
+ for {
+ if p.Match(TokenSymbol, "%}") != nil {
+ // Okay, end the wrapping here
+ wrapper.Endtag = tagIdent.Val
+ return wrapper, newParser(p.template.name, tagArgs, p.template), nil
+ }
+ t := p.Current()
+ p.Consume()
+ if t == nil {
+ return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
+ }
+ tagArgs = append(tagArgs, t)
+ }
+ }
+ }
+
+ }
+
+ // Otherwise process next element to be wrapped
+ node, err := p.parseDocElement()
+ if err != nil {
+ return nil, nil, err
+ }
+ wrapper.nodes = append(wrapper.nodes, node)
+ }
+
+ return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
+ p.lastToken)
+}
+
+// Skips all nodes between starting tag and "{% endtag %}"
+func (p *Parser) SkipUntilTag(names ...string) *Error {
+ for p.Remaining() > 0 {
+ // New tag, check whether we have to stop wrapping here
+ if p.Peek(TokenSymbol, "{%") != nil {
+ tagIdent := p.PeekTypeN(1, TokenIdentifier)
+
+ if tagIdent != nil {
+ // We've found a (!) end-tag
+
+ found := false
+ for _, n := range names {
+ if tagIdent.Val == n {
+ found = true
+ break
+ }
+ }
+
+ // We only process the tag if we've found an end tag
+ if found {
+ // Okay, endtag found.
+ p.ConsumeN(2) // '{%' tagname
+
+ for {
+ if p.Match(TokenSymbol, "%}") != nil {
+ // Done skipping, exit.
+ return nil
+ }
+ }
+ }
+ }
+ }
+ t := p.Current()
+ p.Consume()
+ if t == nil {
+ return p.Error("Unexpected EOF.", p.lastToken)
+ }
+ }
+
+ return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken)
+}
diff --git a/vendor/github.com/flosch/pongo2/parser_document.go b/vendor/github.com/flosch/pongo2/parser_document.go
new file mode 100644
index 0000000..e3ac2c8
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/parser_document.go
@@ -0,0 +1,59 @@
+package pongo2
+
+// Doc = { ( Filter | Tag | HTML ) }
+func (p *Parser) parseDocElement() (INode, *Error) {
+ t := p.Current()
+
+ switch t.Typ {
+ case TokenHTML:
+ n := &nodeHTML{token: t}
+ left := p.PeekTypeN(-1, TokenSymbol)
+ right := p.PeekTypeN(1, TokenSymbol)
+ n.trimLeft = left != nil && left.TrimWhitespaces
+ n.trimRight = right != nil && right.TrimWhitespaces
+ p.Consume() // consume HTML element
+ return n, nil
+ case TokenSymbol:
+ switch t.Val {
+ case "{{":
+ // parse variable
+ variable, err := p.parseVariableElement()
+ if err != nil {
+ return nil, err
+ }
+ return variable, nil
+ case "{%":
+ // parse tag
+ tag, err := p.parseTagElement()
+ if err != nil {
+ return nil, err
+ }
+ return tag, nil
+ }
+ }
+ return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
+}
+
+func (tpl *Template) parse() *Error {
+ tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
+ doc, err := tpl.parser.parseDocument()
+ if err != nil {
+ return err
+ }
+ tpl.root = doc
+ return nil
+}
+
+func (p *Parser) parseDocument() (*nodeDocument, *Error) {
+ doc := &nodeDocument{}
+
+ for p.Remaining() > 0 {
+ node, err := p.parseDocElement()
+ if err != nil {
+ return nil, err
+ }
+ doc.Nodes = append(doc.Nodes, node)
+ }
+
+ return doc, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/parser_expression.go b/vendor/github.com/flosch/pongo2/parser_expression.go
new file mode 100644
index 0000000..1663ec4
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/parser_expression.go
@@ -0,0 +1,503 @@
+package pongo2
+
+import (
+ "fmt"
+ "math"
+)
+
+type Expression struct {
+ // TODO: Add location token?
+ expr1 IEvaluator
+ expr2 IEvaluator
+ opToken *Token
+}
+
+type relationalExpression struct {
+ // TODO: Add location token?
+ expr1 IEvaluator
+ expr2 IEvaluator
+ opToken *Token
+}
+
+type simpleExpression struct {
+ negate bool
+ negativeSign bool
+ term1 IEvaluator
+ term2 IEvaluator
+ opToken *Token
+}
+
+type term struct {
+ // TODO: Add location token?
+ factor1 IEvaluator
+ factor2 IEvaluator
+ opToken *Token
+}
+
+type power struct {
+ // TODO: Add location token?
+ power1 IEvaluator
+ power2 IEvaluator
+}
+
+func (expr *Expression) FilterApplied(name string) bool {
+ return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
+ (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
+}
+
+func (expr *relationalExpression) FilterApplied(name string) bool {
+ return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
+ (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
+}
+
+func (expr *simpleExpression) FilterApplied(name string) bool {
+ return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
+ (expr.term2 != nil && expr.term2.FilterApplied(name)))
+}
+
+func (expr *term) FilterApplied(name string) bool {
+ return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
+ (expr.factor2 != nil && expr.factor2.FilterApplied(name)))
+}
+
+func (expr *power) FilterApplied(name string) bool {
+ return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
+ (expr.power2 != nil && expr.power2.FilterApplied(name)))
+}
+
+func (expr *Expression) GetPositionToken() *Token {
+ return expr.expr1.GetPositionToken()
+}
+
+func (expr *relationalExpression) GetPositionToken() *Token {
+ return expr.expr1.GetPositionToken()
+}
+
+func (expr *simpleExpression) GetPositionToken() *Token {
+ return expr.term1.GetPositionToken()
+}
+
+func (expr *term) GetPositionToken() *Token {
+ return expr.factor1.GetPositionToken()
+}
+
+func (expr *power) GetPositionToken() *Token {
+ return expr.power1.GetPositionToken()
+}
+
+func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ v1, err := expr.expr1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.expr2 != nil {
+ switch expr.opToken.Val {
+ case "and", "&&":
+ if !v1.IsTrue() {
+ return AsValue(false), nil
+ } else {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return AsValue(v2.IsTrue()), nil
+ }
+ case "or", "||":
+ if v1.IsTrue() {
+ return AsValue(true), nil
+ } else {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return AsValue(v2.IsTrue()), nil
+ }
+ default:
+ return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
+ }
+ } else {
+ return v1, nil
+ }
+}
+
+func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ v1, err := expr.expr1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.expr2 != nil {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "<=":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() <= v2.Float()), nil
+ }
+ return AsValue(v1.Integer() <= v2.Integer()), nil
+ case ">=":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() >= v2.Float()), nil
+ }
+ return AsValue(v1.Integer() >= v2.Integer()), nil
+ case "==":
+ return AsValue(v1.EqualValueTo(v2)), nil
+ case ">":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() > v2.Float()), nil
+ }
+ return AsValue(v1.Integer() > v2.Integer()), nil
+ case "<":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() < v2.Float()), nil
+ }
+ return AsValue(v1.Integer() < v2.Integer()), nil
+ case "!=", "<>":
+ return AsValue(!v1.EqualValueTo(v2)), nil
+ case "in":
+ return AsValue(v2.Contains(v1)), nil
+ default:
+ return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
+ }
+ } else {
+ return v1, nil
+ }
+}
+
+func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ t1, err := expr.term1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result := t1
+
+ if expr.negate {
+ result = result.Negate()
+ }
+
+ if expr.negativeSign {
+ if result.IsNumber() {
+ switch {
+ case result.IsFloat():
+ result = AsValue(-1 * result.Float())
+ case result.IsInteger():
+ result = AsValue(-1 * result.Integer())
+ default:
+ return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil)
+ }
+ } else {
+ return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
+ }
+ }
+
+ if expr.term2 != nil {
+ t2, err := expr.term2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "+":
+ if result.IsFloat() || t2.IsFloat() {
+ // Result will be a float
+ return AsValue(result.Float() + t2.Float()), nil
+ }
+ // Result will be an integer
+ return AsValue(result.Integer() + t2.Integer()), nil
+ case "-":
+ if result.IsFloat() || t2.IsFloat() {
+ // Result will be a float
+ return AsValue(result.Float() - t2.Float()), nil
+ }
+ // Result will be an integer
+ return AsValue(result.Integer() - t2.Integer()), nil
+ default:
+ return nil, ctx.Error("Unimplemented", expr.GetPositionToken())
+ }
+ }
+
+ return result, nil
+}
+
+func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ f1, err := expr.factor1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.factor2 != nil {
+ f2, err := expr.factor2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "*":
+ if f1.IsFloat() || f2.IsFloat() {
+ // Result will be float
+ return AsValue(f1.Float() * f2.Float()), nil
+ }
+ // Result will be int
+ return AsValue(f1.Integer() * f2.Integer()), nil
+ case "/":
+ if f1.IsFloat() || f2.IsFloat() {
+ // Result will be float
+ return AsValue(f1.Float() / f2.Float()), nil
+ }
+ // Result will be int
+ return AsValue(f1.Integer() / f2.Integer()), nil
+ case "%":
+ // Result will be int
+ return AsValue(f1.Integer() % f2.Integer()), nil
+ default:
+ return nil, ctx.Error("unimplemented", expr.opToken)
+ }
+ } else {
+ return f1, nil
+ }
+}
+
+func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ p1, err := expr.power1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.power2 != nil {
+ p2, err := expr.power2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return AsValue(math.Pow(p1.Float(), p2.Float())), nil
+ }
+ return p1, nil
+}
+
+func (p *Parser) parseFactor() (IEvaluator, *Error) {
+ if p.Match(TokenSymbol, "(") != nil {
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if p.Match(TokenSymbol, ")") == nil {
+ return nil, p.Error("Closing bracket expected after expression", nil)
+ }
+ return expr, nil
+ }
+
+ return p.parseVariableOrLiteralWithFilter()
+}
+
+func (p *Parser) parsePower() (IEvaluator, *Error) {
+ pw := new(power)
+
+ power1, err := p.parseFactor()
+ if err != nil {
+ return nil, err
+ }
+ pw.power1 = power1
+
+ if p.Match(TokenSymbol, "^") != nil {
+ power2, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+ pw.power2 = power2
+ }
+
+ if pw.power2 == nil {
+ // Shortcut for faster evaluation
+ return pw.power1, nil
+ }
+
+ return pw, nil
+}
+
+func (p *Parser) parseTerm() (IEvaluator, *Error) {
+ returnTerm := new(term)
+
+ factor1, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+ returnTerm.factor1 = factor1
+
+ for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
+ if returnTerm.opToken != nil {
+ // Create new sub-term
+ returnTerm = &term{
+ factor1: returnTerm,
+ }
+ }
+
+ op := p.Current()
+ p.Consume()
+
+ factor2, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+
+ returnTerm.opToken = op
+ returnTerm.factor2 = factor2
+ }
+
+ if returnTerm.opToken == nil {
+ // Shortcut for faster evaluation
+ return returnTerm.factor1, nil
+ }
+
+ return returnTerm, nil
+}
+
+func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
+ expr := new(simpleExpression)
+
+ if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
+ if sign.Val == "-" {
+ expr.negativeSign = true
+ }
+ }
+
+ if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
+ expr.negate = true
+ }
+
+ term1, err := p.parseTerm()
+ if err != nil {
+ return nil, err
+ }
+ expr.term1 = term1
+
+ for p.PeekOne(TokenSymbol, "+", "-") != nil {
+ if expr.opToken != nil {
+ // New sub expr
+ expr = &simpleExpression{
+ term1: expr,
+ }
+ }
+
+ op := p.Current()
+ p.Consume()
+
+ term2, err := p.parseTerm()
+ if err != nil {
+ return nil, err
+ }
+
+ expr.term2 = term2
+ expr.opToken = op
+ }
+
+ if expr.negate == false && expr.negativeSign == false && expr.term2 == nil {
+ // Shortcut for faster evaluation
+ return expr.term1, nil
+ }
+
+ return expr, nil
+}
+
+func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
+ expr1, err := p.parseSimpleExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ expr := &relationalExpression{
+ expr1: expr1,
+ }
+
+ if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
+ expr2, err := p.parseRelationalExpression()
+ if err != nil {
+ return nil, err
+ }
+ expr.opToken = t
+ expr.expr2 = expr2
+ } else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
+ expr2, err := p.parseSimpleExpression()
+ if err != nil {
+ return nil, err
+ }
+ expr.opToken = t
+ expr.expr2 = expr2
+ }
+
+ if expr.expr2 == nil {
+ // Shortcut for faster evaluation
+ return expr.expr1, nil
+ }
+
+ return expr, nil
+}
+
+func (p *Parser) ParseExpression() (IEvaluator, *Error) {
+ rexpr1, err := p.parseRelationalExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ exp := &Expression{
+ expr1: rexpr1,
+ }
+
+ if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
+ op := p.Current()
+ p.Consume()
+ expr2, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ exp.expr2 = expr2
+ exp.opToken = op
+ }
+
+ if exp.expr2 == nil {
+ // Shortcut for faster evaluation
+ return exp.expr1, nil
+ }
+
+ return exp, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/pongo2.go b/vendor/github.com/flosch/pongo2/pongo2.go
new file mode 100644
index 0000000..eda3aa0
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/pongo2.go
@@ -0,0 +1,14 @@
+package pongo2
+
+// Version string
+const Version = "dev"
+
+// Must panics, if a Template couldn't successfully parsed. This is how you
+// would use it:
+// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
+func Must(tpl *Template, err error) *Template {
+ if err != nil {
+ panic(err)
+ }
+ return tpl
+}
diff --git a/vendor/github.com/flosch/pongo2/tags.go b/vendor/github.com/flosch/pongo2/tags.go
new file mode 100644
index 0000000..3668b06
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags.go
@@ -0,0 +1,135 @@
+package pongo2
+
+/* Incomplete:
+ -----------
+
+ verbatim (only the "name" argument is missing for verbatim)
+
+ Reconsideration:
+ ----------------
+
+ debug (reason: not sure what to output yet)
+ regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
+
+ Following built-in tags wont be added:
+ --------------------------------------
+
+ csrf_token (reason: web-framework specific)
+ load (reason: python-specific)
+ url (reason: web-framework specific)
+*/
+
+import (
+ "fmt"
+
+ "github.com/juju/errors"
+)
+
+type INodeTag interface {
+ INode
+}
+
+// This is the function signature of the tag's parser you will have
+// to implement in order to create a new tag.
+//
+// 'doc' is providing access to the whole document while 'arguments'
+// is providing access to the user's arguments to the tag:
+//
+// {% your_tag_name some "arguments" 123 %}
+//
+// start_token will be the *Token with the tag's name in it (here: your_tag_name).
+//
+// Please see the Parser documentation on how to use the parser.
+// See RegisterTag()'s documentation for more information about
+// writing a tag as well.
+type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
+
+type tag struct {
+ name string
+ parser TagParser
+}
+
+var tags map[string]*tag
+
+func init() {
+ tags = make(map[string]*tag)
+}
+
+// Registers a new tag. You usually want to call this
+// function in the tag's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterTag(name string, parserFn TagParser) error {
+ _, existing := tags[name]
+ if existing {
+ return errors.Errorf("tag with name '%s' is already registered", name)
+ }
+ tags[name] = &tag{
+ name: name,
+ parser: parserFn,
+ }
+ return nil
+}
+
+// Replaces an already registered tag with a new implementation. Use this
+// function with caution since it allows you to change existing tag behaviour.
+func ReplaceTag(name string, parserFn TagParser) error {
+ _, existing := tags[name]
+ if !existing {
+ return errors.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name)
+ }
+ tags[name] = &tag{
+ name: name,
+ parser: parserFn,
+ }
+ return nil
+}
+
+// Tag = "{%" IDENT ARGS "%}"
+func (p *Parser) parseTagElement() (INodeTag, *Error) {
+ p.Consume() // consume "{%"
+ tokenName := p.MatchType(TokenIdentifier)
+
+ // Check for identifier
+ if tokenName == nil {
+ return nil, p.Error("Tag name must be an identifier.", nil)
+ }
+
+ // Check for the existing tag
+ tag, exists := tags[tokenName.Val]
+ if !exists {
+ // Does not exists
+ return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
+ }
+
+ // Check sandbox tag restriction
+ if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
+ return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
+ }
+
+ var argsToken []*Token
+ for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
+ // Add token to args
+ argsToken = append(argsToken, p.Current())
+ p.Consume() // next token
+ }
+
+ // EOF?
+ if p.Remaining() == 0 {
+ return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
+ }
+
+ p.Match(TokenSymbol, "%}")
+
+ argParser := newParser(p.name, argsToken, p.template)
+ if len(argsToken) == 0 {
+ // This is done to have nice EOF error messages
+ argParser.lastToken = tokenName
+ }
+
+ p.template.level++
+ defer func() { p.template.level-- }()
+ return tag.parser(p, tokenName, argParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_autoescape.go b/vendor/github.com/flosch/pongo2/tags_autoescape.go
new file mode 100644
index 0000000..590a1db
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_autoescape.go
@@ -0,0 +1,52 @@
+package pongo2
+
+type tagAutoescapeNode struct {
+ wrapper *NodeWrapper
+ autoescape bool
+}
+
+func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ old := ctx.Autoescape
+ ctx.Autoescape = node.autoescape
+
+ err := node.wrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+
+ ctx.Autoescape = old
+
+ return nil
+}
+
+func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ autoescapeNode := &tagAutoescapeNode{}
+
+ wrapper, _, err := doc.WrapUntilTag("endautoescape")
+ if err != nil {
+ return nil, err
+ }
+ autoescapeNode.wrapper = wrapper
+
+ modeToken := arguments.MatchType(TokenIdentifier)
+ if modeToken == nil {
+ return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
+ }
+ if modeToken.Val == "on" {
+ autoescapeNode.autoescape = true
+ } else if modeToken.Val == "off" {
+ autoescapeNode.autoescape = false
+ } else {
+ return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
+ }
+
+ return autoescapeNode, nil
+}
+
+func init() {
+ RegisterTag("autoescape", tagAutoescapeParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_block.go b/vendor/github.com/flosch/pongo2/tags_block.go
new file mode 100644
index 0000000..86145f3
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_block.go
@@ -0,0 +1,129 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type tagBlockNode struct {
+ name string
+}
+
+func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper {
+ nodeWrappers := make([]*NodeWrapper, 0)
+ var t *NodeWrapper
+
+ for tpl != nil {
+ t = tpl.blocks[node.name]
+ if t != nil {
+ nodeWrappers = append(nodeWrappers, t)
+ }
+ tpl = tpl.child
+ }
+
+ return nodeWrappers
+}
+
+func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ tpl := ctx.template
+ if tpl == nil {
+ panic("internal error: tpl == nil")
+ }
+
+ // Determine the block to execute
+ blockWrappers := node.getBlockWrappers(tpl)
+ lenBlockWrappers := len(blockWrappers)
+
+ if lenBlockWrappers == 0 {
+ return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil)
+ }
+
+ blockWrapper := blockWrappers[lenBlockWrappers-1]
+ ctx.Private["block"] = tagBlockInformation{
+ ctx: ctx,
+ wrappers: blockWrappers[0 : lenBlockWrappers-1],
+ }
+ err := blockWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type tagBlockInformation struct {
+ ctx *ExecutionContext
+ wrappers []*NodeWrapper
+}
+
+func (t tagBlockInformation) Super() string {
+ lenWrappers := len(t.wrappers)
+
+ if lenWrappers == 0 {
+ return ""
+ }
+
+ superCtx := NewChildExecutionContext(t.ctx)
+ superCtx.Private["block"] = tagBlockInformation{
+ ctx: t.ctx,
+ wrappers: t.wrappers[0 : lenWrappers-1],
+ }
+
+ blockWrapper := t.wrappers[lenWrappers-1]
+ buf := bytes.NewBufferString("")
+ err := blockWrapper.Execute(superCtx, &templateWriter{buf})
+ if err != nil {
+ return ""
+ }
+ return buf.String()
+}
+
+func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ if arguments.Count() == 0 {
+ return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
+ }
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
+ }
+
+ if arguments.Remaining() != 0 {
+ return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
+ }
+
+ wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
+ if err != nil {
+ return nil, err
+ }
+ if endtagargs.Remaining() > 0 {
+ endtagnameToken := endtagargs.MatchType(TokenIdentifier)
+ if endtagnameToken != nil {
+ if endtagnameToken.Val != nameToken.Val {
+ return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
+ nameToken.Val, endtagnameToken.Val), nil)
+ }
+ }
+
+ if endtagnameToken == nil || endtagargs.Remaining() > 0 {
+ return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
+ }
+ }
+
+ tpl := doc.template
+ if tpl == nil {
+ panic("internal error: tpl == nil")
+ }
+ _, hasBlock := tpl.blocks[nameToken.Val]
+ if !hasBlock {
+ tpl.blocks[nameToken.Val] = wrapper
+ } else {
+ return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
+ }
+
+ return &tagBlockNode{name: nameToken.Val}, nil
+}
+
+func init() {
+ RegisterTag("block", tagBlockParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_comment.go b/vendor/github.com/flosch/pongo2/tags_comment.go
new file mode 100644
index 0000000..56a02ed
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_comment.go
@@ -0,0 +1,27 @@
+package pongo2
+
+type tagCommentNode struct{}
+
+func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ commentNode := &tagCommentNode{}
+
+ // TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
+ err := doc.SkipUntilTag("endcomment")
+ if err != nil {
+ return nil, err
+ }
+
+ if arguments.Count() != 0 {
+ return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
+ }
+
+ return commentNode, nil
+}
+
+func init() {
+ RegisterTag("comment", tagCommentParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_cycle.go b/vendor/github.com/flosch/pongo2/tags_cycle.go
new file mode 100644
index 0000000..ffbd254
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_cycle.go
@@ -0,0 +1,106 @@
+package pongo2
+
+type tagCycleValue struct {
+ node *tagCycleNode
+ value *Value
+}
+
+type tagCycleNode struct {
+ position *Token
+ args []IEvaluator
+ idx int
+ asName string
+ silent bool
+}
+
+func (cv *tagCycleValue) String() string {
+ return cv.value.String()
+}
+
+func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ item := node.args[node.idx%len(node.args)]
+ node.idx++
+
+ val, err := item.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if t, ok := val.Interface().(*tagCycleValue); ok {
+ // {% cycle "test1" "test2"
+ // {% cycle cycleitem %}
+
+ // Update the cycle value with next value
+ item := t.node.args[t.node.idx%len(t.node.args)]
+ t.node.idx++
+
+ val, err := item.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ t.value = val
+
+ if !t.node.silent {
+ writer.WriteString(val.String())
+ }
+ } else {
+ // Regular call
+
+ cycleValue := &tagCycleValue{
+ node: node,
+ value: val,
+ }
+
+ if node.asName != "" {
+ ctx.Private[node.asName] = cycleValue
+ }
+ if !node.silent {
+ writer.WriteString(val.String())
+ }
+ }
+
+ return nil
+}
+
+// HINT: We're not supporting the old comma-separated list of expressions argument-style
+func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ cycleNode := &tagCycleNode{
+ position: start,
+ }
+
+ for arguments.Remaining() > 0 {
+ node, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ cycleNode.args = append(cycleNode.args, node)
+
+ if arguments.MatchOne(TokenKeyword, "as") != nil {
+ // as
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
+ }
+ cycleNode.asName = nameToken.Val
+
+ if arguments.MatchOne(TokenIdentifier, "silent") != nil {
+ cycleNode.silent = true
+ }
+
+ // Now we're finished
+ break
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed cycle-tag.", nil)
+ }
+
+ return cycleNode, nil
+}
+
+func init() {
+ RegisterTag("cycle", tagCycleParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_extends.go b/vendor/github.com/flosch/pongo2/tags_extends.go
new file mode 100644
index 0000000..5771020
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_extends.go
@@ -0,0 +1,52 @@
+package pongo2
+
+type tagExtendsNode struct {
+ filename string
+}
+
+func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ extendsNode := &tagExtendsNode{}
+
+ if doc.template.level > 1 {
+ return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
+ }
+
+ if doc.template.parent != nil {
+ // Already one parent
+ return nil, arguments.Error("This template has already one parent.", start)
+ }
+
+ if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
+ // prepared, static template
+
+ // Get parent's filename
+ parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ // Parse the parent
+ parentTemplate, err := doc.template.set.FromFile(parentFilename)
+ if err != nil {
+ return nil, err.(*Error)
+ }
+
+ // Keep track of things
+ parentTemplate.child = doc.template
+ doc.template.parent = parentTemplate
+ extendsNode.filename = parentFilename
+ } else {
+ return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
+ }
+
+ return extendsNode, nil
+}
+
+func init() {
+ RegisterTag("extends", tagExtendsParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_filter.go b/vendor/github.com/flosch/pongo2/tags_filter.go
new file mode 100644
index 0000000..b38fd92
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_filter.go
@@ -0,0 +1,95 @@
+package pongo2
+
+import (
+ "bytes"
+)
+
+type nodeFilterCall struct {
+ name string
+ paramExpr IEvaluator
+}
+
+type tagFilterNode struct {
+ position *Token
+ bodyWrapper *NodeWrapper
+ filterChain []*nodeFilterCall
+}
+
+func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
+
+ err := node.bodyWrapper.Execute(ctx, temp)
+ if err != nil {
+ return err
+ }
+
+ value := AsValue(temp.String())
+
+ for _, call := range node.filterChain {
+ var param *Value
+ if call.paramExpr != nil {
+ param, err = call.paramExpr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ } else {
+ param = AsValue(nil)
+ }
+ value, err = ApplyFilter(call.name, value, param)
+ if err != nil {
+ return ctx.Error(err.Error(), node.position)
+ }
+ }
+
+ writer.WriteString(value.String())
+
+ return nil
+}
+
+func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ filterNode := &tagFilterNode{
+ position: start,
+ }
+
+ wrapper, _, err := doc.WrapUntilTag("endfilter")
+ if err != nil {
+ return nil, err
+ }
+ filterNode.bodyWrapper = wrapper
+
+ for arguments.Remaining() > 0 {
+ filterCall := &nodeFilterCall{}
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Expected a filter name (identifier).", nil)
+ }
+ filterCall.name = nameToken.Val
+
+ if arguments.MatchOne(TokenSymbol, ":") != nil {
+ // Filter parameter
+ // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
+ expr, err := arguments.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ filterCall.paramExpr = expr
+ }
+
+ filterNode.filterChain = append(filterNode.filterChain, filterCall)
+
+ if arguments.MatchOne(TokenSymbol, "|") == nil {
+ break
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed filter-tag arguments.", nil)
+ }
+
+ return filterNode, nil
+}
+
+func init() {
+ RegisterTag("filter", tagFilterParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_firstof.go b/vendor/github.com/flosch/pongo2/tags_firstof.go
new file mode 100644
index 0000000..5b2888e
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_firstof.go
@@ -0,0 +1,49 @@
+package pongo2
+
+type tagFirstofNode struct {
+ position *Token
+ args []IEvaluator
+}
+
+func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, arg := range node.args {
+ val, err := arg.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if val.IsTrue() {
+ if ctx.Autoescape && !arg.FilterApplied("safe") {
+ val, err = ApplyFilter("escape", val, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ writer.WriteString(val.String())
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ firstofNode := &tagFirstofNode{
+ position: start,
+ }
+
+ for arguments.Remaining() > 0 {
+ node, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ firstofNode.args = append(firstofNode.args, node)
+ }
+
+ return firstofNode, nil
+}
+
+func init() {
+ RegisterTag("firstof", tagFirstofParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_for.go b/vendor/github.com/flosch/pongo2/tags_for.go
new file mode 100644
index 0000000..5b0b555
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_for.go
@@ -0,0 +1,159 @@
+package pongo2
+
+type tagForNode struct {
+ key string
+ value string // only for maps: for key, value in map
+ objectEvaluator IEvaluator
+ reversed bool
+ sorted bool
+
+ bodyWrapper *NodeWrapper
+ emptyWrapper *NodeWrapper
+}
+
+type tagForLoopInformation struct {
+ Counter int
+ Counter0 int
+ Revcounter int
+ Revcounter0 int
+ First bool
+ Last bool
+ Parentloop *tagForLoopInformation
+}
+
+func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
+ // Backup forloop (as parentloop in public context), key-name and value-name
+ forCtx := NewChildExecutionContext(ctx)
+ parentloop := forCtx.Private["forloop"]
+
+ // Create loop struct
+ loopInfo := &tagForLoopInformation{
+ First: true,
+ }
+
+ // Is it a loop in a loop?
+ if parentloop != nil {
+ loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
+ }
+
+ // Register loopInfo in public context
+ forCtx.Private["forloop"] = loopInfo
+
+ obj, err := node.objectEvaluator.Evaluate(forCtx)
+ if err != nil {
+ return err
+ }
+
+ obj.IterateOrder(func(idx, count int, key, value *Value) bool {
+ // There's something to iterate over (correct type and at least 1 item)
+
+ // Update loop infos and public context
+ forCtx.Private[node.key] = key
+ if value != nil {
+ forCtx.Private[node.value] = value
+ }
+ loopInfo.Counter = idx + 1
+ loopInfo.Counter0 = idx
+ if idx == 1 {
+ loopInfo.First = false
+ }
+ if idx+1 == count {
+ loopInfo.Last = true
+ }
+ loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
+ loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
+
+ // Render elements with updated context
+ err := node.bodyWrapper.Execute(forCtx, writer)
+ if err != nil {
+ forError = err
+ return false
+ }
+ return true
+ }, func() {
+ // Nothing to iterate over (maybe wrong type or no items)
+ if node.emptyWrapper != nil {
+ err := node.emptyWrapper.Execute(forCtx, writer)
+ if err != nil {
+ forError = err
+ }
+ }
+ }, node.reversed, node.sorted)
+
+ return forError
+}
+
+func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ forNode := &tagForNode{}
+
+ // Arguments parsing
+ var valueToken *Token
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
+ }
+
+ if arguments.Match(TokenSymbol, ",") != nil {
+ // Value name is provided
+ valueToken = arguments.MatchType(TokenIdentifier)
+ if valueToken == nil {
+ return nil, arguments.Error("Value name must be an identifier.", nil)
+ }
+ }
+
+ if arguments.Match(TokenKeyword, "in") == nil {
+ return nil, arguments.Error("Expected keyword 'in'.", nil)
+ }
+
+ objectEvaluator, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ forNode.objectEvaluator = objectEvaluator
+ forNode.key = keyToken.Val
+ if valueToken != nil {
+ forNode.value = valueToken.Val
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
+ forNode.reversed = true
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
+ forNode.sorted = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed for-loop arguments.", nil)
+ }
+
+ // Body wrapping
+ wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
+ if err != nil {
+ return nil, err
+ }
+ forNode.bodyWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "empty" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endfor")
+ if err != nil {
+ return nil, err
+ }
+ forNode.emptyWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return forNode, nil
+}
+
+func init() {
+ RegisterTag("for", tagForParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_if.go b/vendor/github.com/flosch/pongo2/tags_if.go
new file mode 100644
index 0000000..3eeaf3b
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_if.go
@@ -0,0 +1,76 @@
+package pongo2
+
+type tagIfNode struct {
+ conditions []IEvaluator
+ wrappers []*NodeWrapper
+}
+
+func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for i, condition := range node.conditions {
+ result, err := condition.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if result.IsTrue() {
+ return node.wrappers[i].Execute(ctx, writer)
+ }
+ // Last condition?
+ if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
+ return node.wrappers[i+1].Execute(ctx, writer)
+ }
+ }
+ return nil
+}
+
+func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifNode := &tagIfNode{}
+
+ // Parse first and main IF condition
+ condition, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifNode.conditions = append(ifNode.conditions, condition)
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("If-condition is malformed.", nil)
+ }
+
+ // Check the rest
+ for {
+ wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
+ if err != nil {
+ return nil, err
+ }
+ ifNode.wrappers = append(ifNode.wrappers, wrapper)
+
+ if wrapper.Endtag == "elif" {
+ // elif can take a condition
+ condition, err = tagArgs.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifNode.conditions = append(ifNode.conditions, condition)
+
+ if tagArgs.Remaining() > 0 {
+ return nil, tagArgs.Error("Elif-condition is malformed.", nil)
+ }
+ } else {
+ if tagArgs.Count() > 0 {
+ // else/endif can't take any conditions
+ return nil, tagArgs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ if wrapper.Endtag == "endif" {
+ break
+ }
+ }
+
+ return ifNode, nil
+}
+
+func init() {
+ RegisterTag("if", tagIfParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifchanged.go b/vendor/github.com/flosch/pongo2/tags_ifchanged.go
new file mode 100644
index 0000000..45296a0
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_ifchanged.go
@@ -0,0 +1,116 @@
+package pongo2
+
+import (
+ "bytes"
+)
+
+type tagIfchangedNode struct {
+ watchedExpr []IEvaluator
+ lastValues []*Value
+ lastContent []byte
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ if len(node.watchedExpr) == 0 {
+ // Check against own rendered body
+
+ buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
+ err := node.thenWrapper.Execute(ctx, buf)
+ if err != nil {
+ return err
+ }
+
+ bufBytes := buf.Bytes()
+ if !bytes.Equal(node.lastContent, bufBytes) {
+ // Rendered content changed, output it
+ writer.Write(bufBytes)
+ node.lastContent = bufBytes
+ }
+ } else {
+ nowValues := make([]*Value, 0, len(node.watchedExpr))
+ for _, expr := range node.watchedExpr {
+ val, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ nowValues = append(nowValues, val)
+ }
+
+ // Compare old to new values now
+ changed := len(node.lastValues) == 0
+
+ for idx, oldVal := range node.lastValues {
+ if !oldVal.EqualValueTo(nowValues[idx]) {
+ changed = true
+ break // we can stop here because ONE value changed
+ }
+ }
+
+ node.lastValues = nowValues
+
+ if changed {
+ // Render thenWrapper
+ err := node.thenWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Render elseWrapper
+ err := node.elseWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifchangedNode := &tagIfchangedNode{}
+
+ for arguments.Remaining() > 0 {
+ // Parse condition
+ expr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifchangedNode, nil
+}
+
+func init() {
+ RegisterTag("ifchanged", tagIfchangedParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifequal.go b/vendor/github.com/flosch/pongo2/tags_ifequal.go
new file mode 100644
index 0000000..103f1c7
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_ifequal.go
@@ -0,0 +1,78 @@
+package pongo2
+
+type tagIfEqualNode struct {
+ var1, var2 IEvaluator
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ r1, err := node.var1.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ r2, err := node.var2.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ result := r1.EqualValueTo(r2)
+
+ if result {
+ return node.thenWrapper.Execute(ctx, writer)
+ }
+ if node.elseWrapper != nil {
+ return node.elseWrapper.Execute(ctx, writer)
+ }
+ return nil
+}
+
+func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifequalNode := &tagIfEqualNode{}
+
+ // Parse two expressions
+ var1, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ var2, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.var1 = var1
+ ifequalNode.var2 = var2
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifequalNode, nil
+}
+
+func init() {
+ RegisterTag("ifequal", tagIfEqualParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go b/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
new file mode 100644
index 0000000..0d287d3
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
@@ -0,0 +1,78 @@
+package pongo2
+
+type tagIfNotEqualNode struct {
+ var1, var2 IEvaluator
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ r1, err := node.var1.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ r2, err := node.var2.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ result := !r1.EqualValueTo(r2)
+
+ if result {
+ return node.thenWrapper.Execute(ctx, writer)
+ }
+ if node.elseWrapper != nil {
+ return node.elseWrapper.Execute(ctx, writer)
+ }
+ return nil
+}
+
+func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifnotequalNode := &tagIfNotEqualNode{}
+
+ // Parse two expressions
+ var1, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ var2, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.var1 = var1
+ ifnotequalNode.var2 = var2
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifnotequalNode, nil
+}
+
+func init() {
+ RegisterTag("ifnotequal", tagIfNotEqualParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_import.go b/vendor/github.com/flosch/pongo2/tags_import.go
new file mode 100644
index 0000000..7e0d6a0
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_import.go
@@ -0,0 +1,84 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+type tagImportNode struct {
+ position *Token
+ filename string
+ macros map[string]*tagMacroNode // alias/name -> macro instance
+}
+
+func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for name, macro := range node.macros {
+ func(name string, macro *tagMacroNode) {
+ ctx.Private[name] = func(args ...*Value) *Value {
+ return macro.call(ctx, args...)
+ }
+ }(name, macro)
+ }
+ return nil
+}
+
+func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ importNode := &tagImportNode{
+ position: start,
+ macros: make(map[string]*tagMacroNode),
+ }
+
+ filenameToken := arguments.MatchType(TokenString)
+ if filenameToken == nil {
+ return nil, arguments.Error("Import-tag needs a filename as string.", nil)
+ }
+
+ importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ if arguments.Remaining() == 0 {
+ return nil, arguments.Error("You must at least specify one macro to import.", nil)
+ }
+
+ // Compile the given template
+ tpl, err := doc.template.set.FromFile(importNode.filename)
+ if err != nil {
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
+ }
+
+ for arguments.Remaining() > 0 {
+ macroNameToken := arguments.MatchType(TokenIdentifier)
+ if macroNameToken == nil {
+ return nil, arguments.Error("Expected macro name (identifier).", nil)
+ }
+
+ asName := macroNameToken.Val
+ if arguments.Match(TokenKeyword, "as") != nil {
+ aliasToken := arguments.MatchType(TokenIdentifier)
+ if aliasToken == nil {
+ return nil, arguments.Error("Expected macro alias name (identifier).", nil)
+ }
+ asName = aliasToken.Val
+ }
+
+ macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
+ if !has {
+ return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
+ importNode.filename), macroNameToken)
+ }
+
+ importNode.macros[asName] = macroInstance
+
+ if arguments.Remaining() == 0 {
+ break
+ }
+
+ if arguments.Match(TokenSymbol, ",") == nil {
+ return nil, arguments.Error("Expected ','.", nil)
+ }
+ }
+
+ return importNode, nil
+}
+
+func init() {
+ RegisterTag("import", tagImportParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_include.go b/vendor/github.com/flosch/pongo2/tags_include.go
new file mode 100644
index 0000000..6d619fd
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_include.go
@@ -0,0 +1,146 @@
+package pongo2
+
+type tagIncludeNode struct {
+ tpl *Template
+ filenameEvaluator IEvaluator
+ lazy bool
+ only bool
+ filename string
+ withPairs map[string]IEvaluator
+ ifExists bool
+}
+
+func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ // Building the context for the template
+ includeCtx := make(Context)
+
+ // Fill the context with all data from the parent
+ if !node.only {
+ includeCtx.Update(ctx.Public)
+ includeCtx.Update(ctx.Private)
+ }
+
+ // Put all custom with-pairs into the context
+ for key, value := range node.withPairs {
+ val, err := value.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ includeCtx[key] = val
+ }
+
+ // Execute the template
+ if node.lazy {
+ // Evaluate the filename
+ filename, err := node.filenameEvaluator.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if filename.String() == "" {
+ return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
+ }
+
+ // Get include-filename
+ includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
+
+ includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
+ if err2 != nil {
+ // if this is ReadFile error, and "if_exists" flag is enabled
+ if node.ifExists && err2.(*Error).Sender == "fromfile" {
+ return nil
+ }
+ return err2.(*Error)
+ }
+ err2 = includedTpl.ExecuteWriter(includeCtx, writer)
+ if err2 != nil {
+ return err2.(*Error)
+ }
+ return nil
+ }
+ // Template is already parsed with static filename
+ err := node.tpl.ExecuteWriter(includeCtx, writer)
+ if err != nil {
+ return err.(*Error)
+ }
+ return nil
+}
+
+type tagIncludeEmptyNode struct{}
+
+func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ includeNode := &tagIncludeNode{
+ withPairs: make(map[string]IEvaluator),
+ }
+
+ if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
+ // prepared, static template
+
+ // "if_exists" flag
+ ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
+
+ // Get include-filename
+ includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ // Parse the parent
+ includeNode.filename = includedFilename
+ includedTpl, err := doc.template.set.FromFile(includedFilename)
+ if err != nil {
+ // if this is ReadFile error, and "if_exists" token presents we should create and empty node
+ if err.(*Error).Sender == "fromfile" && ifExists {
+ return &tagIncludeEmptyNode{}, nil
+ }
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
+ }
+ includeNode.tpl = includedTpl
+ } else {
+ // No String, then the user wants to use lazy-evaluation (slower, but possible)
+ filenameEvaluator, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
+ }
+ includeNode.filenameEvaluator = filenameEvaluator
+ includeNode.lazy = true
+ includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
+ }
+
+ // After having parsed the filename we're gonna parse the with+only options
+ if arguments.Match(TokenIdentifier, "with") != nil {
+ for arguments.Remaining() > 0 {
+ // We have at least one key=expr pair (because of starting "with")
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
+ }
+
+ includeNode.withPairs[keyToken.Val] = valueExpr
+
+ // Only?
+ if arguments.Match(TokenIdentifier, "only") != nil {
+ includeNode.only = true
+ break // stop parsing arguments because it's the last option
+ }
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
+ }
+
+ return includeNode, nil
+}
+
+func init() {
+ RegisterTag("include", tagIncludeParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_lorem.go b/vendor/github.com/flosch/pongo2/tags_lorem.go
new file mode 100644
index 0000000..1d353f2
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_lorem.go
@@ -0,0 +1,133 @@
+package pongo2
+
+import (
+ "math/rand"
+ "strings"
+ "time"
+
+ "github.com/juju/errors"
+)
+
+var (
+ tagLoremParagraphs = strings.Split(tagLoremText, "\n")
+ tagLoremWords = strings.Fields(tagLoremText)
+)
+
+type tagLoremNode struct {
+ position *Token
+ count int // number of paragraphs
+ method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
+ random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
+}
+
+func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ switch node.method {
+ case "b":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
+ writer.WriteString(par)
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
+ writer.WriteString(par)
+ }
+ }
+ case "w":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString(" ")
+ }
+ word := tagLoremWords[rand.Intn(len(tagLoremWords))]
+ writer.WriteString(word)
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString(" ")
+ }
+ word := tagLoremWords[i%len(tagLoremWords)]
+ writer.WriteString(word)
+ }
+ }
+ case "p":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ writer.WriteString("
")
+ par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
+ writer.WriteString(par)
+ writer.WriteString("
")
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ writer.WriteString("
")
+ par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
+ writer.WriteString(par)
+ writer.WriteString("
")
+
+ }
+ }
+ default:
+ return ctx.OrigError(errors.Errorf("unsupported method: %s", node.method), nil)
+ }
+
+ return nil
+}
+
+func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ loremNode := &tagLoremNode{
+ position: start,
+ count: 1,
+ method: "b",
+ }
+
+ if countToken := arguments.MatchType(TokenNumber); countToken != nil {
+ loremNode.count = AsValue(countToken.Val).Integer()
+ }
+
+ if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
+ if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
+ return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
+ }
+
+ loremNode.method = methodToken.Val
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "random") != nil {
+ loremNode.random = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
+ }
+
+ return loremNode, nil
+}
+
+func init() {
+ rand.Seed(time.Now().Unix())
+
+ RegisterTag("lorem", tagLoremParser)
+}
+
+const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
+Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
+Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
+At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
+Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
diff --git a/vendor/github.com/flosch/pongo2/tags_macro.go b/vendor/github.com/flosch/pongo2/tags_macro.go
new file mode 100644
index 0000000..dd3e0bf
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_macro.go
@@ -0,0 +1,149 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type tagMacroNode struct {
+ position *Token
+ name string
+ argsOrder []string
+ args map[string]IEvaluator
+ exported bool
+
+ wrapper *NodeWrapper
+}
+
+func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ ctx.Private[node.name] = func(args ...*Value) *Value {
+ return node.call(ctx, args...)
+ }
+
+ return nil
+}
+
+func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
+ argsCtx := make(Context)
+
+ for k, v := range node.args {
+ if v == nil {
+ // User did not provided a default value
+ argsCtx[k] = nil
+ } else {
+ // Evaluate the default value
+ valueExpr, err := v.Evaluate(ctx)
+ if err != nil {
+ ctx.Logf(err.Error())
+ return AsSafeValue(err.Error())
+ }
+
+ argsCtx[k] = valueExpr
+ }
+ }
+
+ if len(args) > len(node.argsOrder) {
+ // Too many arguments, we're ignoring them and just logging into debug mode.
+ err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
+ node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
+
+ ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
+ return AsSafeValue(err.Error())
+ }
+
+ // Make a context for the macro execution
+ macroCtx := NewChildExecutionContext(ctx)
+
+ // Register all arguments in the private context
+ macroCtx.Private.Update(argsCtx)
+
+ for idx, argValue := range args {
+ macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
+ }
+
+ var b bytes.Buffer
+ err := node.wrapper.Execute(macroCtx, &b)
+ if err != nil {
+ return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
+ }
+
+ return AsSafeValue(b.String())
+}
+
+func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ macroNode := &tagMacroNode{
+ position: start,
+ args: make(map[string]IEvaluator),
+ }
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
+ }
+ macroNode.name = nameToken.Val
+
+ if arguments.MatchOne(TokenSymbol, "(") == nil {
+ return nil, arguments.Error("Expected '('.", nil)
+ }
+
+ for arguments.Match(TokenSymbol, ")") == nil {
+ argNameToken := arguments.MatchType(TokenIdentifier)
+ if argNameToken == nil {
+ return nil, arguments.Error("Expected argument name as identifier.", nil)
+ }
+ macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
+
+ if arguments.Match(TokenSymbol, "=") != nil {
+ // Default expression follows
+ argDefaultExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ macroNode.args[argNameToken.Val] = argDefaultExpr
+ } else {
+ // No default expression
+ macroNode.args[argNameToken.Val] = nil
+ }
+
+ if arguments.Match(TokenSymbol, ")") != nil {
+ break
+ }
+ if arguments.Match(TokenSymbol, ",") == nil {
+ return nil, arguments.Error("Expected ',' or ')'.", nil)
+ }
+ }
+
+ if arguments.Match(TokenKeyword, "export") != nil {
+ macroNode.exported = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed macro-tag.", nil)
+ }
+
+ // Body wrapping
+ wrapper, endargs, err := doc.WrapUntilTag("endmacro")
+ if err != nil {
+ return nil, err
+ }
+ macroNode.wrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if macroNode.exported {
+ // Now register the macro if it wants to be exported
+ _, has := doc.template.exportedMacros[macroNode.name]
+ if has {
+ return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start)
+ }
+ doc.template.exportedMacros[macroNode.name] = macroNode
+ }
+
+ return macroNode, nil
+}
+
+func init() {
+ RegisterTag("macro", tagMacroParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_now.go b/vendor/github.com/flosch/pongo2/tags_now.go
new file mode 100644
index 0000000..d9fa4a3
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_now.go
@@ -0,0 +1,50 @@
+package pongo2
+
+import (
+ "time"
+)
+
+type tagNowNode struct {
+ position *Token
+ format string
+ fake bool
+}
+
+func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ var t time.Time
+ if node.fake {
+ t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
+ } else {
+ t = time.Now()
+ }
+
+ writer.WriteString(t.Format(node.format))
+
+ return nil
+}
+
+func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ nowNode := &tagNowNode{
+ position: start,
+ }
+
+ formatToken := arguments.MatchType(TokenString)
+ if formatToken == nil {
+ return nil, arguments.Error("Expected a format string.", nil)
+ }
+ nowNode.format = formatToken.Val
+
+ if arguments.MatchOne(TokenIdentifier, "fake") != nil {
+ nowNode.fake = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed now-tag arguments.", nil)
+ }
+
+ return nowNode, nil
+}
+
+func init() {
+ RegisterTag("now", tagNowParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_set.go b/vendor/github.com/flosch/pongo2/tags_set.go
new file mode 100644
index 0000000..be121c1
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_set.go
@@ -0,0 +1,50 @@
+package pongo2
+
+type tagSetNode struct {
+ name string
+ expression IEvaluator
+}
+
+func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ // Evaluate expression
+ value, err := node.expression.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ ctx.Private[node.name] = value
+ return nil
+}
+
+func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ node := &tagSetNode{}
+
+ // Parse variable name
+ typeToken := arguments.MatchType(TokenIdentifier)
+ if typeToken == nil {
+ return nil, arguments.Error("Expected an identifier.", nil)
+ }
+ node.name = typeToken.Val
+
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+
+ // Variable expression
+ keyExpression, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ node.expression = keyExpression
+
+ // Remaining arguments
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
+ }
+
+ return node, nil
+}
+
+func init() {
+ RegisterTag("set", tagSetParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_spaceless.go b/vendor/github.com/flosch/pongo2/tags_spaceless.go
new file mode 100644
index 0000000..4fa851b
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_spaceless.go
@@ -0,0 +1,54 @@
+package pongo2
+
+import (
+ "bytes"
+ "regexp"
+)
+
+type tagSpacelessNode struct {
+ wrapper *NodeWrapper
+}
+
+var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
+
+func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
+
+ err := node.wrapper.Execute(ctx, b)
+ if err != nil {
+ return err
+ }
+
+ s := b.String()
+ // Repeat this recursively
+ changed := true
+ for changed {
+ s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
+ changed = s != s2
+ s = s2
+ }
+
+ writer.WriteString(s)
+
+ return nil
+}
+
+func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ spacelessNode := &tagSpacelessNode{}
+
+ wrapper, _, err := doc.WrapUntilTag("endspaceless")
+ if err != nil {
+ return nil, err
+ }
+ spacelessNode.wrapper = wrapper
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
+ }
+
+ return spacelessNode, nil
+}
+
+func init() {
+ RegisterTag("spaceless", tagSpacelessParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_ssi.go b/vendor/github.com/flosch/pongo2/tags_ssi.go
new file mode 100644
index 0000000..c33858d
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_ssi.go
@@ -0,0 +1,68 @@
+package pongo2
+
+import (
+ "io/ioutil"
+)
+
+type tagSSINode struct {
+ filename string
+ content string
+ template *Template
+}
+
+func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ if node.template != nil {
+ // Execute the template within the current context
+ includeCtx := make(Context)
+ includeCtx.Update(ctx.Public)
+ includeCtx.Update(ctx.Private)
+
+ err := node.template.execute(includeCtx, writer)
+ if err != nil {
+ return err.(*Error)
+ }
+ } else {
+ // Just print out the content
+ writer.WriteString(node.content)
+ }
+ return nil
+}
+
+func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ SSINode := &tagSSINode{}
+
+ if fileToken := arguments.MatchType(TokenString); fileToken != nil {
+ SSINode.filename = fileToken.Val
+
+ if arguments.Match(TokenIdentifier, "parsed") != nil {
+ // parsed
+ temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
+ if err != nil {
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken)
+ }
+ SSINode.template = temporaryTpl
+ } else {
+ // plaintext
+ buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
+ if err != nil {
+ return nil, (&Error{
+ Sender: "tag:ssi",
+ OrigError: err,
+ }).updateFromTokenIfNeeded(doc.template, fileToken)
+ }
+ SSINode.content = string(buf)
+ }
+ } else {
+ return nil, arguments.Error("First argument must be a string.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed SSI-tag argument.", nil)
+ }
+
+ return SSINode, nil
+}
+
+func init() {
+ RegisterTag("ssi", tagSSIParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_templatetag.go b/vendor/github.com/flosch/pongo2/tags_templatetag.go
new file mode 100644
index 0000000..164b4dc
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_templatetag.go
@@ -0,0 +1,45 @@
+package pongo2
+
+type tagTemplateTagNode struct {
+ content string
+}
+
+var templateTagMapping = map[string]string{
+ "openblock": "{%",
+ "closeblock": "%}",
+ "openvariable": "{{",
+ "closevariable": "}}",
+ "openbrace": "{",
+ "closebrace": "}",
+ "opencomment": "{#",
+ "closecomment": "#}",
+}
+
+func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ writer.WriteString(node.content)
+ return nil
+}
+
+func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ttNode := &tagTemplateTagNode{}
+
+ if argToken := arguments.MatchType(TokenIdentifier); argToken != nil {
+ output, found := templateTagMapping[argToken.Val]
+ if !found {
+ return nil, arguments.Error("Argument not found", argToken)
+ }
+ ttNode.content = output
+ } else {
+ return nil, arguments.Error("Identifier expected.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
+ }
+
+ return ttNode, nil
+}
+
+func init() {
+ RegisterTag("templatetag", tagTemplateTagParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_widthratio.go b/vendor/github.com/flosch/pongo2/tags_widthratio.go
new file mode 100644
index 0000000..70c9c3e
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_widthratio.go
@@ -0,0 +1,83 @@
+package pongo2
+
+import (
+ "fmt"
+ "math"
+)
+
+type tagWidthratioNode struct {
+ position *Token
+ current, max IEvaluator
+ width IEvaluator
+ ctxName string
+}
+
+func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ current, err := node.current.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ max, err := node.max.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ width, err := node.width.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
+
+ if node.ctxName == "" {
+ writer.WriteString(fmt.Sprintf("%d", value))
+ } else {
+ ctx.Private[node.ctxName] = value
+ }
+
+ return nil
+}
+
+func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ widthratioNode := &tagWidthratioNode{
+ position: start,
+ }
+
+ current, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.current = current
+
+ max, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.max = max
+
+ width, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.width = width
+
+ if arguments.MatchOne(TokenKeyword, "as") != nil {
+ // Name follows
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Expected name (identifier).", nil)
+ }
+ widthratioNode.ctxName = nameToken.Val
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
+ }
+
+ return widthratioNode, nil
+}
+
+func init() {
+ RegisterTag("widthratio", tagWidthratioParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_with.go b/vendor/github.com/flosch/pongo2/tags_with.go
new file mode 100644
index 0000000..32b3c1c
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_with.go
@@ -0,0 +1,88 @@
+package pongo2
+
+type tagWithNode struct {
+ withPairs map[string]IEvaluator
+ wrapper *NodeWrapper
+}
+
+func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ //new context for block
+ withctx := NewChildExecutionContext(ctx)
+
+ // Put all custom with-pairs into the context
+ for key, value := range node.withPairs {
+ val, err := value.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ withctx.Private[key] = val
+ }
+
+ return node.wrapper.Execute(withctx, writer)
+}
+
+func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ withNode := &tagWithNode{
+ withPairs: make(map[string]IEvaluator),
+ }
+
+ if arguments.Count() == 0 {
+ return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
+ }
+
+ wrapper, endargs, err := doc.WrapUntilTag("endwith")
+ if err != nil {
+ return nil, err
+ }
+ withNode.wrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ // Scan through all arguments to see which style the user uses (old or new style).
+ // If we find any "as" keyword we will enforce old style; otherwise we will use new style.
+ oldStyle := false // by default we're using the new_style
+ for i := 0; i < arguments.Count(); i++ {
+ if arguments.PeekN(i, TokenKeyword, "as") != nil {
+ oldStyle = true
+ break
+ }
+ }
+
+ for arguments.Remaining() > 0 {
+ if oldStyle {
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if arguments.Match(TokenKeyword, "as") == nil {
+ return nil, arguments.Error("Expected 'as' keyword.", nil)
+ }
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ withNode.withPairs[keyToken.Val] = valueExpr
+ } else {
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ withNode.withPairs[keyToken.Val] = valueExpr
+ }
+ }
+
+ return withNode, nil
+}
+
+func init() {
+ RegisterTag("with", tagWithParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/template.go b/vendor/github.com/flosch/pongo2/template.go
new file mode 100644
index 0000000..fbe2106
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/template.go
@@ -0,0 +1,277 @@
+package pongo2
+
+import (
+ "bytes"
+ "io"
+ "strings"
+
+ "github.com/juju/errors"
+)
+
+type TemplateWriter interface {
+ io.Writer
+ WriteString(string) (int, error)
+}
+
+type templateWriter struct {
+ w io.Writer
+}
+
+func (tw *templateWriter) WriteString(s string) (int, error) {
+ return tw.w.Write([]byte(s))
+}
+
+func (tw *templateWriter) Write(b []byte) (int, error) {
+ return tw.w.Write(b)
+}
+
+type Template struct {
+ set *TemplateSet
+
+ // Input
+ isTplString bool
+ name string
+ tpl string
+ size int
+
+ // Calculation
+ tokens []*Token
+ parser *Parser
+
+ // first come, first serve (it's important to not override existing entries in here)
+ level int
+ parent *Template
+ child *Template
+ blocks map[string]*NodeWrapper
+ exportedMacros map[string]*tagMacroNode
+
+ // Output
+ root *nodeDocument
+
+ // Options allow you to change the behavior of template-engine.
+ // You can change the options before calling the Execute method.
+ Options *Options
+}
+
+func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) {
+ return newTemplate(set, "", true, tpl)
+}
+
+func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) {
+ strTpl := string(tpl)
+
+ // Create the template
+ t := &Template{
+ set: set,
+ isTplString: isTplString,
+ name: name,
+ tpl: strTpl,
+ size: len(strTpl),
+ blocks: make(map[string]*NodeWrapper),
+ exportedMacros: make(map[string]*tagMacroNode),
+ Options: newOptions(),
+ }
+ // Copy all settings from another Options.
+ t.Options.Update(set.Options)
+
+ // Tokenize it
+ tokens, err := lex(name, strTpl)
+ if err != nil {
+ return nil, err
+ }
+ t.tokens = tokens
+
+ // For debugging purposes, show all tokens:
+ /*for i, t := range tokens {
+ fmt.Printf("%3d. %s\n", i, t)
+ }*/
+
+ // Parse it
+ err = t.parse()
+ if err != nil {
+ return nil, err
+ }
+
+ return t, nil
+}
+
+func (tpl *Template) newContextForExecution(context Context) (*Template, *ExecutionContext, error) {
+ if tpl.Options.TrimBlocks || tpl.Options.LStripBlocks {
+ // Issue #94 https://github.com/flosch/pongo2/issues/94
+ // If an application configures pongo2 template to trim_blocks,
+ // the first newline after a template tag is removed automatically (like in PHP).
+ prev := &Token{
+ Typ: TokenHTML,
+ Val: "\n",
+ }
+
+ for _, t := range tpl.tokens {
+ if tpl.Options.LStripBlocks {
+ if prev.Typ == TokenHTML && t.Typ != TokenHTML && t.Val == "{%" {
+ prev.Val = strings.TrimRight(prev.Val, "\t ")
+ }
+ }
+
+ if tpl.Options.TrimBlocks {
+ if prev.Typ != TokenHTML && t.Typ == TokenHTML && prev.Val == "%}" {
+ if len(t.Val) > 0 && t.Val[0] == '\n' {
+ t.Val = t.Val[1:len(t.Val)]
+ }
+ }
+ }
+
+ prev = t
+ }
+ }
+
+ // Determine the parent to be executed (for template inheritance)
+ parent := tpl
+ for parent.parent != nil {
+ parent = parent.parent
+ }
+
+ // Create context if none is given
+ newContext := make(Context)
+ newContext.Update(tpl.set.Globals)
+
+ if context != nil {
+ newContext.Update(context)
+
+ if len(newContext) > 0 {
+ // Check for context name syntax
+ err := newContext.checkForValidIdentifiers()
+ if err != nil {
+ return parent, nil, err
+ }
+
+ // Check for clashes with macro names
+ for k := range newContext {
+ _, has := tpl.exportedMacros[k]
+ if has {
+ return parent, nil, &Error{
+ Filename: tpl.name,
+ Sender: "execution",
+ OrigError: errors.Errorf("context key name '%s' clashes with macro '%s'", k, k),
+ }
+ }
+ }
+ }
+ }
+
+ // Create operational context
+ ctx := newExecutionContext(parent, newContext)
+
+ return parent, ctx, nil
+}
+
+func (tpl *Template) execute(context Context, writer TemplateWriter) error {
+ parent, ctx, err := tpl.newContextForExecution(context)
+ if err != nil {
+ return err
+ }
+
+ // Run the selected document
+ if err := parent.root.Execute(ctx, writer); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
+ return tpl.execute(context, &templateWriter{w: writer})
+}
+
+func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
+ // Create output buffer
+ // We assume that the rendered template will be 30% larger
+ buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
+ if err := tpl.execute(context, buffer); err != nil {
+ return nil, err
+ }
+ return buffer, nil
+}
+
+// Executes the template with the given context and writes to writer (io.Writer)
+// on success. Context can be nil. Nothing is written on error; instead the error
+// is being returned.
+func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
+ buf, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return err
+ }
+ _, err = buf.WriteTo(writer)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Same as ExecuteWriter. The only difference between both functions is that
+// this function might already have written parts of the generated template in the
+// case of an execution error because there's no intermediate buffer involved for
+// performance reasons. This is handy if you need high performance template
+// generation or if you want to manage your own pool of buffers.
+func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
+ return tpl.newTemplateWriterAndExecute(context, writer)
+}
+
+// Executes the template and returns the rendered template as a []byte
+func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
+ // Execute template
+ buffer, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return nil, err
+ }
+ return buffer.Bytes(), nil
+}
+
+// Executes the template and returns the rendered template as a string
+func (tpl *Template) Execute(context Context) (string, error) {
+ // Execute template
+ buffer, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return "", err
+ }
+
+ return buffer.String(), nil
+
+}
+
+func (tpl *Template) ExecuteBlocks(context Context, blocks []string) (map[string]string, error) {
+ var parents []*Template
+ result := make(map[string]string)
+
+ parent := tpl
+ for parent != nil {
+ parents = append(parents, parent)
+ parent = parent.parent
+ }
+
+ for _, t := range parents {
+ buffer := bytes.NewBuffer(make([]byte, 0, int(float64(t.size)*1.3)))
+ _, ctx, err := t.newContextForExecution(context)
+ if err != nil {
+ return nil, err
+ }
+ for _, blockName := range blocks {
+ if _, ok := result[blockName]; ok {
+ continue
+ }
+ if blockWrapper, ok := t.blocks[blockName]; ok {
+ bErr := blockWrapper.Execute(ctx, buffer)
+ if bErr != nil {
+ return nil, bErr
+ }
+ result[blockName] = buffer.String()
+ buffer.Reset()
+ }
+ }
+ // We have found all blocks
+ if len(blocks) == len(result) {
+ break
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/template_loader.go b/vendor/github.com/flosch/pongo2/template_loader.go
new file mode 100644
index 0000000..bc80f4a
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/template_loader.go
@@ -0,0 +1,157 @@
+package pongo2
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+
+ "github.com/juju/errors"
+)
+
+// LocalFilesystemLoader represents a local filesystem loader with basic
+// BaseDirectory capabilities. The access to the local filesystem is unrestricted.
+type LocalFilesystemLoader struct {
+ baseDir string
+}
+
+// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance
+// and panics if there's any error during instantiation. The parameters
+// are the same like NewLocalFileSystemLoader.
+func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader {
+ fs, err := NewLocalFileSystemLoader(baseDir)
+ if err != nil {
+ log.Panic(err)
+ }
+ return fs
+}
+
+// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows
+// templatesto be loaded from disk (unrestricted). If any base directory
+// is given (or being set using SetBaseDir), this base directory is being used
+// for path calculation in template inclusions/imports. Otherwise the path
+// is calculated based relatively to the including template's path.
+func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) {
+ fs := &LocalFilesystemLoader{}
+ if baseDir != "" {
+ if err := fs.SetBaseDir(baseDir); err != nil {
+ return nil, err
+ }
+ }
+ return fs, nil
+}
+
+// SetBaseDir sets the template's base directory. This directory will
+// be used for any relative path in filters, tags and From*-functions to determine
+// your template. See the comment for NewLocalFileSystemLoader as well.
+func (fs *LocalFilesystemLoader) SetBaseDir(path string) error {
+ // Make the path absolute
+ if !filepath.IsAbs(path) {
+ abs, err := filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+ path = abs
+ }
+
+ // Check for existence
+ fi, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return errors.Errorf("The given path '%s' is not a directory.", path)
+ }
+
+ fs.baseDir = path
+ return nil
+}
+
+// Get reads the path's content from your local filesystem.
+func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) {
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewReader(buf), nil
+}
+
+// Abs resolves a filename relative to the base directory. Absolute paths are allowed.
+// When there's no base dir set, the absolute path to the filename
+// will be calculated based on either the provided base directory (which
+// might be a path of a template which includes another template) or
+// the current working directory.
+func (fs *LocalFilesystemLoader) Abs(base, name string) string {
+ if filepath.IsAbs(name) {
+ return name
+ }
+
+ // Our own base dir has always priority; if there's none
+ // we use the path provided in base.
+ var err error
+ if fs.baseDir == "" {
+ if base == "" {
+ base, err = os.Getwd()
+ if err != nil {
+ panic(err)
+ }
+ return filepath.Join(base, name)
+ }
+
+ return filepath.Join(filepath.Dir(base), name)
+ }
+
+ return filepath.Join(fs.baseDir, name)
+}
+
+// SandboxedFilesystemLoader is still WIP.
+type SandboxedFilesystemLoader struct {
+ *LocalFilesystemLoader
+}
+
+// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance.
+func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) {
+ fs, err := NewLocalFileSystemLoader(baseDir)
+ if err != nil {
+ return nil, err
+ }
+ return &SandboxedFilesystemLoader{
+ LocalFilesystemLoader: fs,
+ }, nil
+}
+
+// Move sandbox to a virtual fs
+
+/*
+if len(set.SandboxDirectories) > 0 {
+ defer func() {
+ // Remove any ".." or other crap
+ resolvedPath = filepath.Clean(resolvedPath)
+
+ // Make the path absolute
+ absPath, err := filepath.Abs(resolvedPath)
+ if err != nil {
+ panic(err)
+ }
+ resolvedPath = absPath
+
+ // Check against the sandbox directories (once one pattern matches, we're done and can allow it)
+ for _, pattern := range set.SandboxDirectories {
+ matched, err := filepath.Match(pattern, resolvedPath)
+ if err != nil {
+ panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
+ }
+ if matched {
+ // OK!
+ return
+ }
+ }
+
+ // No pattern matched, we have to log+deny the request
+ set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath)
+ resolvedPath = ""
+ }()
+}
+*/
diff --git a/vendor/github.com/flosch/pongo2/template_sets.go b/vendor/github.com/flosch/pongo2/template_sets.go
new file mode 100644
index 0000000..78b3c8d
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/template_sets.go
@@ -0,0 +1,305 @@
+package pongo2
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "sync"
+
+ "github.com/juju/errors"
+)
+
+// TemplateLoader allows to implement a virtual file system.
+type TemplateLoader interface {
+ // Abs calculates the path to a given template. Whenever a path must be resolved
+ // due to an import from another template, the base equals the parent template's path.
+ Abs(base, name string) string
+
+ // Get returns an io.Reader where the template's content can be read from.
+ Get(path string) (io.Reader, error)
+}
+
+// TemplateSet allows you to create your own group of templates with their own
+// global context (which is shared among all members of the set) and their own
+// configuration.
+// It's useful for a separation of different kind of templates
+// (e. g. web templates vs. mail templates).
+type TemplateSet struct {
+ name string
+ loaders []TemplateLoader
+
+ // Globals will be provided to all templates created within this template set
+ Globals Context
+
+ // If debug is true (default false), ExecutionContext.Logf() will work and output
+ // to STDOUT. Furthermore, FromCache() won't cache the templates.
+ // Make sure to synchronize the access to it in case you're changing this
+ // variable during program execution (and template compilation/execution).
+ Debug bool
+
+ // Options allow you to change the behavior of template-engine.
+ // You can change the options before calling the Execute method.
+ Options *Options
+
+ // Sandbox features
+ // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
+ //
+ // For efficiency reasons you can ban tags/filters only *before* you have
+ // added your first template to the set (restrictions are statically checked).
+ // After you added one, it's not possible anymore (for your personal security).
+ firstTemplateCreated bool
+ bannedTags map[string]bool
+ bannedFilters map[string]bool
+
+ // Template cache (for FromCache())
+ templateCache map[string]*Template
+ templateCacheMutex sync.Mutex
+}
+
+// NewSet can be used to create sets with different kind of templates
+// (e. g. web from mail templates), with different globals or
+// other configurations.
+func NewSet(name string, loaders ...TemplateLoader) *TemplateSet {
+ if len(loaders) == 0 {
+ panic(fmt.Errorf("at least one template loader must be specified"))
+ }
+
+ return &TemplateSet{
+ name: name,
+ loaders: loaders,
+ Globals: make(Context),
+ bannedTags: make(map[string]bool),
+ bannedFilters: make(map[string]bool),
+ templateCache: make(map[string]*Template),
+ Options: newOptions(),
+ }
+}
+
+func (set *TemplateSet) AddLoader(loaders ...TemplateLoader) {
+ set.loaders = append(set.loaders, loaders...)
+}
+
+func (set *TemplateSet) resolveFilename(tpl *Template, path string) string {
+ return set.resolveFilenameForLoader(set.loaders[0], tpl, path)
+}
+
+func (set *TemplateSet) resolveFilenameForLoader(loader TemplateLoader, tpl *Template, path string) string {
+ name := ""
+ if tpl != nil && tpl.isTplString {
+ return path
+ }
+ if tpl != nil {
+ name = tpl.name
+ }
+
+ return loader.Abs(name, path)
+}
+
+// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet.
+func (set *TemplateSet) BanTag(name string) error {
+ _, has := tags[name]
+ if !has {
+ return errors.Errorf("tag '%s' not found", name)
+ }
+ if set.firstTemplateCreated {
+ return errors.New("you cannot ban any tags after you've added your first template to your template set")
+ }
+ _, has = set.bannedTags[name]
+ if has {
+ return errors.Errorf("tag '%s' is already banned", name)
+ }
+ set.bannedTags[name] = true
+
+ return nil
+}
+
+// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet.
+func (set *TemplateSet) BanFilter(name string) error {
+ _, has := filters[name]
+ if !has {
+ return errors.Errorf("filter '%s' not found", name)
+ }
+ if set.firstTemplateCreated {
+ return errors.New("you cannot ban any filters after you've added your first template to your template set")
+ }
+ _, has = set.bannedFilters[name]
+ if has {
+ return errors.Errorf("filter '%s' is already banned", name)
+ }
+ set.bannedFilters[name] = true
+
+ return nil
+}
+
+func (set *TemplateSet) resolveTemplate(tpl *Template, path string) (name string, loader TemplateLoader, fd io.Reader, err error) {
+ // iterate over loaders until we appear to have a valid template
+ for _, loader = range set.loaders {
+ name = set.resolveFilenameForLoader(loader, tpl, path)
+ fd, err = loader.Get(name)
+ if err == nil {
+ return
+ }
+ }
+
+ return path, nil, nil, fmt.Errorf("unable to resolve template")
+}
+
+// CleanCache cleans the template cache. If filenames is not empty,
+// it will remove the template caches of those filenames.
+// Or it will empty the whole template cache. It is thread-safe.
+func (set *TemplateSet) CleanCache(filenames ...string) {
+ set.templateCacheMutex.Lock()
+ defer set.templateCacheMutex.Unlock()
+
+ if len(filenames) == 0 {
+ set.templateCache = make(map[string]*Template, len(set.templateCache))
+ }
+
+ for _, filename := range filenames {
+ delete(set.templateCache, set.resolveFilename(nil, filename))
+ }
+}
+
+// FromCache is a convenient method to cache templates. It is thread-safe
+// and will only compile the template associated with a filename once.
+// If TemplateSet.Debug is true (for example during development phase),
+// FromCache() will not cache the template and instead recompile it on any
+// call (to make changes to a template live instantaneously).
+func (set *TemplateSet) FromCache(filename string) (*Template, error) {
+ if set.Debug {
+ // Recompile on any request
+ return set.FromFile(filename)
+ }
+ // Cache the template
+ cleanedFilename := set.resolveFilename(nil, filename)
+
+ set.templateCacheMutex.Lock()
+ defer set.templateCacheMutex.Unlock()
+
+ tpl, has := set.templateCache[cleanedFilename]
+
+ // Cache miss
+ if !has {
+ tpl, err := set.FromFile(cleanedFilename)
+ if err != nil {
+ return nil, err
+ }
+ set.templateCache[cleanedFilename] = tpl
+ return tpl, nil
+ }
+
+ // Cache hit
+ return tpl, nil
+}
+
+// FromString loads a template from string and returns a Template instance.
+func (set *TemplateSet) FromString(tpl string) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ return newTemplateString(set, []byte(tpl))
+}
+
+// FromBytes loads a template from bytes and returns a Template instance.
+func (set *TemplateSet) FromBytes(tpl []byte) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ return newTemplateString(set, tpl)
+}
+
+// FromFile loads a template from a filename and returns a Template instance.
+func (set *TemplateSet) FromFile(filename string) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ _, _, fd, err := set.resolveTemplate(nil, filename)
+ if err != nil {
+ return nil, &Error{
+ Filename: filename,
+ Sender: "fromfile",
+ OrigError: err,
+ }
+ }
+ buf, err := ioutil.ReadAll(fd)
+ if err != nil {
+ return nil, &Error{
+ Filename: filename,
+ Sender: "fromfile",
+ OrigError: err,
+ }
+ }
+
+ return newTemplate(set, filename, false, buf)
+}
+
+// RenderTemplateString is a shortcut and renders a template string directly.
+func (set *TemplateSet) RenderTemplateString(s string, ctx Context) (string, error) {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromString(s))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ return "", err
+ }
+ return result, nil
+}
+
+// RenderTemplateBytes is a shortcut and renders template bytes directly.
+func (set *TemplateSet) RenderTemplateBytes(b []byte, ctx Context) (string, error) {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromBytes(b))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ return "", err
+ }
+ return result, nil
+}
+
+// RenderTemplateFile is a shortcut and renders a template file directly.
+func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) (string, error) {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromFile(fn))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ return "", err
+ }
+ return result, nil
+}
+
+func (set *TemplateSet) logf(format string, args ...interface{}) {
+ if set.Debug {
+ logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
+ }
+}
+
+// Logging function (internally used)
+func logf(format string, items ...interface{}) {
+ if debug {
+ logger.Printf(format, items...)
+ }
+}
+
+var (
+ debug bool // internal debugging
+ logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile)
+
+ // DefaultLoader allows the default un-sandboxed access to the local file
+ // system and is being used by the DefaultSet.
+ DefaultLoader = MustNewLocalFileSystemLoader("")
+
+ // DefaultSet is a set created for you for convinience reasons.
+ DefaultSet = NewSet("default", DefaultLoader)
+
+ // Methods on the default set
+ FromString = DefaultSet.FromString
+ FromBytes = DefaultSet.FromBytes
+ FromFile = DefaultSet.FromFile
+ FromCache = DefaultSet.FromCache
+ RenderTemplateString = DefaultSet.RenderTemplateString
+ RenderTemplateFile = DefaultSet.RenderTemplateFile
+
+ // Globals for the default set
+ Globals = DefaultSet.Globals
+)
diff --git a/vendor/github.com/flosch/pongo2/value.go b/vendor/github.com/flosch/pongo2/value.go
new file mode 100644
index 0000000..df70bbc
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/value.go
@@ -0,0 +1,520 @@
+package pongo2
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+type Value struct {
+ val reflect.Value
+ safe bool // used to indicate whether a Value needs explicit escaping in the template
+}
+
+// AsValue converts any given value to a pongo2.Value
+// Usually being used within own functions passed to a template
+// through a Context or within filter functions.
+//
+// Example:
+// AsValue("my string")
+func AsValue(i interface{}) *Value {
+ return &Value{
+ val: reflect.ValueOf(i),
+ }
+}
+
+// AsSafeValue works like AsValue, but does not apply the 'escape' filter.
+func AsSafeValue(i interface{}) *Value {
+ return &Value{
+ val: reflect.ValueOf(i),
+ safe: true,
+ }
+}
+
+func (v *Value) getResolvedValue() reflect.Value {
+ if v.val.IsValid() && v.val.Kind() == reflect.Ptr {
+ return v.val.Elem()
+ }
+ return v.val
+}
+
+// IsString checks whether the underlying value is a string
+func (v *Value) IsString() bool {
+ return v.getResolvedValue().Kind() == reflect.String
+}
+
+// IsBool checks whether the underlying value is a bool
+func (v *Value) IsBool() bool {
+ return v.getResolvedValue().Kind() == reflect.Bool
+}
+
+// IsFloat checks whether the underlying value is a float
+func (v *Value) IsFloat() bool {
+ return v.getResolvedValue().Kind() == reflect.Float32 ||
+ v.getResolvedValue().Kind() == reflect.Float64
+}
+
+// IsInteger checks whether the underlying value is an integer
+func (v *Value) IsInteger() bool {
+ return v.getResolvedValue().Kind() == reflect.Int ||
+ v.getResolvedValue().Kind() == reflect.Int8 ||
+ v.getResolvedValue().Kind() == reflect.Int16 ||
+ v.getResolvedValue().Kind() == reflect.Int32 ||
+ v.getResolvedValue().Kind() == reflect.Int64 ||
+ v.getResolvedValue().Kind() == reflect.Uint ||
+ v.getResolvedValue().Kind() == reflect.Uint8 ||
+ v.getResolvedValue().Kind() == reflect.Uint16 ||
+ v.getResolvedValue().Kind() == reflect.Uint32 ||
+ v.getResolvedValue().Kind() == reflect.Uint64
+}
+
+// IsNumber checks whether the underlying value is either an integer
+// or a float.
+func (v *Value) IsNumber() bool {
+ return v.IsInteger() || v.IsFloat()
+}
+
+// IsNil checks whether the underlying value is NIL
+func (v *Value) IsNil() bool {
+ //fmt.Printf("%+v\n", v.getResolvedValue().Type().String())
+ return !v.getResolvedValue().IsValid()
+}
+
+// String returns a string for the underlying value. If this value is not
+// of type string, pongo2 tries to convert it. Currently the following
+// types for underlying values are supported:
+//
+// 1. string
+// 2. int/uint (any size)
+// 3. float (any precision)
+// 4. bool
+// 5. time.Time
+// 6. String() will be called on the underlying value if provided
+//
+// NIL values will lead to an empty string. Unsupported types are leading
+// to their respective type name.
+func (v *Value) String() string {
+ if v.IsNil() {
+ return ""
+ }
+
+ switch v.getResolvedValue().Kind() {
+ case reflect.String:
+ return v.getResolvedValue().String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(v.getResolvedValue().Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(v.getResolvedValue().Uint(), 10)
+ case reflect.Float32, reflect.Float64:
+ return fmt.Sprintf("%f", v.getResolvedValue().Float())
+ case reflect.Bool:
+ if v.Bool() {
+ return "True"
+ }
+ return "False"
+ case reflect.Struct:
+ if t, ok := v.Interface().(fmt.Stringer); ok {
+ return t.String()
+ }
+ }
+
+ logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String())
+ return v.getResolvedValue().String()
+}
+
+// Integer returns the underlying value as an integer (converts the underlying
+// value, if necessary). If it's not possible to convert the underlying value,
+// it will return 0.
+func (v *Value) Integer() int {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return int(v.getResolvedValue().Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return int(v.getResolvedValue().Uint())
+ case reflect.Float32, reflect.Float64:
+ return int(v.getResolvedValue().Float())
+ case reflect.String:
+ // Try to convert from string to int (base 10)
+ f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
+ if err != nil {
+ return 0
+ }
+ return int(f)
+ default:
+ logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0
+ }
+}
+
+// Float returns the underlying value as a float (converts the underlying
+// value, if necessary). If it's not possible to convert the underlying value,
+// it will return 0.0.
+func (v *Value) Float() float64 {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.getResolvedValue().Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return float64(v.getResolvedValue().Uint())
+ case reflect.Float32, reflect.Float64:
+ return v.getResolvedValue().Float()
+ case reflect.String:
+ // Try to convert from string to float64 (base 10)
+ f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
+ if err != nil {
+ return 0.0
+ }
+ return f
+ default:
+ logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0.0
+ }
+}
+
+// Bool returns the underlying value as bool. If the value is not bool, false
+// will always be returned. If you're looking for true/false-evaluation of the
+// underlying value, have a look on the IsTrue()-function.
+func (v *Value) Bool() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Bool:
+ return v.getResolvedValue().Bool()
+ default:
+ logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// IsTrue tries to evaluate the underlying value the Pythonic-way:
+//
+// Returns TRUE in one the following cases:
+//
+// * int != 0
+// * uint != 0
+// * float != 0.0
+// * len(array/chan/map/slice/string) > 0
+// * bool == true
+// * underlying value is a struct
+//
+// Otherwise returns always FALSE.
+func (v *Value) IsTrue() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.getResolvedValue().Int() != 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return v.getResolvedValue().Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return v.getResolvedValue().Float() != 0
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return v.getResolvedValue().Len() > 0
+ case reflect.Bool:
+ return v.getResolvedValue().Bool()
+ case reflect.Struct:
+ return true // struct instance is always true
+ default:
+ logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// Negate tries to negate the underlying value. It's mainly used for
+// the NOT-operator and in conjunction with a call to
+// return_value.IsTrue() afterwards.
+//
+// Example:
+// AsValue(1).Negate().IsTrue() == false
+func (v *Value) Negate() *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if v.Integer() != 0 {
+ return AsValue(0)
+ }
+ return AsValue(1)
+ case reflect.Float32, reflect.Float64:
+ if v.Float() != 0.0 {
+ return AsValue(float64(0.0))
+ }
+ return AsValue(float64(1.1))
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return AsValue(v.getResolvedValue().Len() == 0)
+ case reflect.Bool:
+ return AsValue(!v.getResolvedValue().Bool())
+ case reflect.Struct:
+ return AsValue(false)
+ default:
+ logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue(true)
+ }
+}
+
+// Len returns the length for an array, chan, map, slice or string.
+// Otherwise it will return 0.
+func (v *Value) Len() int {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ return v.getResolvedValue().Len()
+ case reflect.String:
+ runes := []rune(v.getResolvedValue().String())
+ return len(runes)
+ default:
+ logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0
+ }
+}
+
+// Slice slices an array, slice or string. Otherwise it will
+// return an empty []int.
+func (v *Value) Slice(i, j int) *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice:
+ return AsValue(v.getResolvedValue().Slice(i, j).Interface())
+ case reflect.String:
+ runes := []rune(v.getResolvedValue().String())
+ return AsValue(string(runes[i:j]))
+ default:
+ logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue([]int{})
+ }
+}
+
+// Index gets the i-th item of an array, slice or string. Otherwise
+// it will return NIL.
+func (v *Value) Index(i int) *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice:
+ if i >= v.Len() {
+ return AsValue(nil)
+ }
+ return AsValue(v.getResolvedValue().Index(i).Interface())
+ case reflect.String:
+ //return AsValue(v.getResolvedValue().Slice(i, i+1).Interface())
+ s := v.getResolvedValue().String()
+ runes := []rune(s)
+ if i < len(runes) {
+ return AsValue(string(runes[i]))
+ }
+ return AsValue("")
+ default:
+ logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue([]int{})
+ }
+}
+
+// Contains checks whether the underlying value (which must be of type struct, map,
+// string, array or slice) contains of another Value (e. g. used to check
+// whether a struct contains of a specific field or a map contains a specific key).
+//
+// Example:
+// AsValue("Hello, World!").Contains(AsValue("World")) == true
+func (v *Value) Contains(other *Value) bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Struct:
+ fieldValue := v.getResolvedValue().FieldByName(other.String())
+ return fieldValue.IsValid()
+ case reflect.Map:
+ var mapValue reflect.Value
+ switch other.Interface().(type) {
+ case int:
+ mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
+ case string:
+ mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
+ default:
+ logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String())
+ return false
+ }
+
+ return mapValue.IsValid()
+ case reflect.String:
+ return strings.Contains(v.getResolvedValue().String(), other.String())
+
+ case reflect.Slice, reflect.Array:
+ for i := 0; i < v.getResolvedValue().Len(); i++ {
+ item := v.getResolvedValue().Index(i)
+ if other.Interface() == item.Interface() {
+ return true
+ }
+ }
+ return false
+
+ default:
+ logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// CanSlice checks whether the underlying value is of type array, slice or string.
+// You normally would use CanSlice() before using the Slice() operation.
+func (v *Value) CanSlice() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice, reflect.String:
+ return true
+ }
+ return false
+}
+
+// Iterate iterates over a map, array, slice or a string. It calls the
+// function's first argument for every value with the following arguments:
+//
+// idx current 0-index
+// count total amount of items
+// key *Value for the key or item
+// value *Value (only for maps, the respective value for a specific key)
+//
+// If the underlying value has no items or is not one of the types above,
+// the empty function (function's second argument) will be called.
+func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) {
+ v.IterateOrder(fn, empty, false, false)
+}
+
+// IterateOrder behaves like Value.Iterate, but can iterate through an array/slice/string in reverse. Does
+// not affect the iteration through a map because maps don't have any particular order.
+// However, you can force an order using the `sorted` keyword (and even use `reversed sorted`).
+func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Map:
+ keys := sortedKeys(v.getResolvedValue().MapKeys())
+ if sorted {
+ if reverse {
+ sort.Sort(sort.Reverse(keys))
+ } else {
+ sort.Sort(keys)
+ }
+ }
+ keyLen := len(keys)
+ for idx, key := range keys {
+ value := v.getResolvedValue().MapIndex(key)
+ if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) {
+ return
+ }
+ }
+ if keyLen == 0 {
+ empty()
+ }
+ return // done
+ case reflect.Array, reflect.Slice:
+ var items valuesList
+
+ itemCount := v.getResolvedValue().Len()
+ for i := 0; i < itemCount; i++ {
+ items = append(items, &Value{val: v.getResolvedValue().Index(i)})
+ }
+
+ if sorted {
+ if reverse {
+ sort.Sort(sort.Reverse(items))
+ } else {
+ sort.Sort(items)
+ }
+ } else {
+ if reverse {
+ for i := 0; i < itemCount/2; i++ {
+ items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i]
+ }
+ }
+ }
+
+ if len(items) > 0 {
+ for idx, item := range items {
+ if !fn(idx, itemCount, item, nil) {
+ return
+ }
+ }
+ } else {
+ empty()
+ }
+ return // done
+ case reflect.String:
+ if sorted {
+ // TODO(flosch): Handle sorted
+ panic("TODO: handle sort for type string")
+ }
+
+ // TODO(flosch): Not utf8-compatible (utf8-decoding necessary)
+ charCount := v.getResolvedValue().Len()
+ if charCount > 0 {
+ if reverse {
+ for i := charCount - 1; i >= 0; i-- {
+ if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
+ return
+ }
+ }
+ } else {
+ for i := 0; i < charCount; i++ {
+ if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
+ return
+ }
+ }
+ }
+ } else {
+ empty()
+ }
+ return // done
+ default:
+ logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ }
+ empty()
+}
+
+// Interface gives you access to the underlying value.
+func (v *Value) Interface() interface{} {
+ if v.val.IsValid() {
+ return v.val.Interface()
+ }
+ return nil
+}
+
+// EqualValueTo checks whether two values are containing the same value or object.
+func (v *Value) EqualValueTo(other *Value) bool {
+ // comparison of uint with int fails using .Interface()-comparison (see issue #64)
+ if v.IsInteger() && other.IsInteger() {
+ return v.Integer() == other.Integer()
+ }
+ return v.Interface() == other.Interface()
+}
+
+type sortedKeys []reflect.Value
+
+func (sk sortedKeys) Len() int {
+ return len(sk)
+}
+
+func (sk sortedKeys) Less(i, j int) bool {
+ vi := &Value{val: sk[i]}
+ vj := &Value{val: sk[j]}
+ switch {
+ case vi.IsInteger() && vj.IsInteger():
+ return vi.Integer() < vj.Integer()
+ case vi.IsFloat() && vj.IsFloat():
+ return vi.Float() < vj.Float()
+ default:
+ return vi.String() < vj.String()
+ }
+}
+
+func (sk sortedKeys) Swap(i, j int) {
+ sk[i], sk[j] = sk[j], sk[i]
+}
+
+type valuesList []*Value
+
+func (vl valuesList) Len() int {
+ return len(vl)
+}
+
+func (vl valuesList) Less(i, j int) bool {
+ vi := vl[i]
+ vj := vl[j]
+ switch {
+ case vi.IsInteger() && vj.IsInteger():
+ return vi.Integer() < vj.Integer()
+ case vi.IsFloat() && vj.IsFloat():
+ return vi.Float() < vj.Float()
+ default:
+ return vi.String() < vj.String()
+ }
+}
+
+func (vl valuesList) Swap(i, j int) {
+ vl[i], vl[j] = vl[j], vl[i]
+}
diff --git a/vendor/github.com/flosch/pongo2/variable.go b/vendor/github.com/flosch/pongo2/variable.go
new file mode 100644
index 0000000..a506e37
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/variable.go
@@ -0,0 +1,695 @@
+package pongo2
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/juju/errors"
+)
+
+const (
+ varTypeInt = iota
+ varTypeIdent
+)
+
+var (
+ typeOfValuePtr = reflect.TypeOf(new(Value))
+ typeOfExecCtxPtr = reflect.TypeOf(new(ExecutionContext))
+)
+
+type variablePart struct {
+ typ int
+ s string
+ i int
+
+ isFunctionCall bool
+ callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls)
+}
+
+type functionCallArgument interface {
+ Evaluate(*ExecutionContext) (*Value, *Error)
+}
+
+// TODO: Add location tokens
+type stringResolver struct {
+ locationToken *Token
+ val string
+}
+
+type intResolver struct {
+ locationToken *Token
+ val int
+}
+
+type floatResolver struct {
+ locationToken *Token
+ val float64
+}
+
+type boolResolver struct {
+ locationToken *Token
+ val bool
+}
+
+type variableResolver struct {
+ locationToken *Token
+
+ parts []*variablePart
+}
+
+type nodeFilteredVariable struct {
+ locationToken *Token
+
+ resolver IEvaluator
+ filterChain []*filterCall
+}
+
+type nodeVariable struct {
+ locationToken *Token
+ expr IEvaluator
+}
+
+type executionCtxEval struct{}
+
+func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := v.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := vr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := s.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := i.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := f.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := b.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (v *nodeFilteredVariable) GetPositionToken() *Token {
+ return v.locationToken
+}
+
+func (vr *variableResolver) GetPositionToken() *Token {
+ return vr.locationToken
+}
+
+func (s *stringResolver) GetPositionToken() *Token {
+ return s.locationToken
+}
+
+func (i *intResolver) GetPositionToken() *Token {
+ return i.locationToken
+}
+
+func (f *floatResolver) GetPositionToken() *Token {
+ return f.locationToken
+}
+
+func (b *boolResolver) GetPositionToken() *Token {
+ return b.locationToken
+}
+
+func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(s.val), nil
+}
+
+func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(i.val), nil
+}
+
+func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(f.val), nil
+}
+
+func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(b.val), nil
+}
+
+func (s *stringResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (i *intResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (f *floatResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (b *boolResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (nv *nodeVariable) FilterApplied(name string) bool {
+ return nv.expr.FilterApplied(name)
+}
+
+func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := nv.expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape {
+ // apply escape filter
+ value, err = filters["escape"](value, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (executionCtxEval) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(ctx), nil
+}
+
+func (vr *variableResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (vr *variableResolver) String() string {
+ parts := make([]string, 0, len(vr.parts))
+ for _, p := range vr.parts {
+ switch p.typ {
+ case varTypeInt:
+ parts = append(parts, strconv.Itoa(p.i))
+ case varTypeIdent:
+ parts = append(parts, p.s)
+ default:
+ panic("unimplemented")
+ }
+ }
+ return strings.Join(parts, ".")
+}
+
+func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) {
+ var current reflect.Value
+ var isSafe bool
+
+ for idx, part := range vr.parts {
+ if idx == 0 {
+ // We're looking up the first part of the variable.
+ // First we're having a look in our private
+ // context (e. g. information provided by tags, like the forloop)
+ val, inPrivate := ctx.Private[vr.parts[0].s]
+ if !inPrivate {
+ // Nothing found? Then have a final lookup in the public context
+ val = ctx.Public[vr.parts[0].s]
+ }
+ current = reflect.ValueOf(val) // Get the initial value
+ } else {
+ // Next parts, resolve it from current
+
+ // Before resolving the pointer, let's see if we have a method to call
+ // Problem with resolving the pointer is we're changing the receiver
+ isFunc := false
+ if part.typ == varTypeIdent {
+ funcValue := current.MethodByName(part.s)
+ if funcValue.IsValid() {
+ current = funcValue
+ isFunc = true
+ }
+ }
+
+ if !isFunc {
+ // If current a pointer, resolve it
+ if current.Kind() == reflect.Ptr {
+ current = current.Elem()
+ if !current.IsValid() {
+ // Value is not valid (anymore)
+ return AsValue(nil), nil
+ }
+ }
+
+ // Look up which part must be called now
+ switch part.typ {
+ case varTypeInt:
+ // Calling an index is only possible for:
+ // * slices/arrays/strings
+ switch current.Kind() {
+ case reflect.String, reflect.Array, reflect.Slice:
+ if part.i >= 0 && current.Len() > part.i {
+ current = current.Index(part.i)
+ } else {
+ // In Django, exceeding the length of a list is just empty.
+ return AsValue(nil), nil
+ }
+ default:
+ return nil, errors.Errorf("Can't access an index on type %s (variable %s)",
+ current.Kind().String(), vr.String())
+ }
+ case varTypeIdent:
+ // debugging:
+ // fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String())
+
+ // Calling a field or key
+ switch current.Kind() {
+ case reflect.Struct:
+ current = current.FieldByName(part.s)
+ case reflect.Map:
+ current = current.MapIndex(reflect.ValueOf(part.s))
+ default:
+ return nil, errors.Errorf("Can't access a field by name on type %s (variable %s)",
+ current.Kind().String(), vr.String())
+ }
+ default:
+ panic("unimplemented")
+ }
+ }
+ }
+
+ if !current.IsValid() {
+ // Value is not valid (anymore)
+ return AsValue(nil), nil
+ }
+
+ // If current is a reflect.ValueOf(pongo2.Value), then unpack it
+ // Happens in function calls (as a return value) or by injecting
+ // into the execution context (e.g. in a for-loop)
+ if current.Type() == typeOfValuePtr {
+ tmpValue := current.Interface().(*Value)
+ current = tmpValue.val
+ isSafe = tmpValue.safe
+ }
+
+ // Check whether this is an interface and resolve it where required
+ if current.Kind() == reflect.Interface {
+ current = reflect.ValueOf(current.Interface())
+ }
+
+ // Check if the part is a function call
+ if part.isFunctionCall || current.Kind() == reflect.Func {
+ // Check for callable
+ if current.Kind() != reflect.Func {
+ return nil, errors.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String())
+ }
+
+ // Check for correct function syntax and types
+ // func(*Value, ...) *Value
+ t := current.Type()
+ currArgs := part.callingArgs
+
+ // If an implicit ExecCtx is needed
+ if t.NumIn() > 0 && t.In(0) == typeOfExecCtxPtr {
+ currArgs = append([]functionCallArgument{executionCtxEval{}}, currArgs...)
+ }
+
+ // Input arguments
+ if len(currArgs) != t.NumIn() && !(len(currArgs) >= t.NumIn()-1 && t.IsVariadic()) {
+ return nil,
+ errors.Errorf("Function input argument count (%d) of '%s' must be equal to the calling argument count (%d).",
+ t.NumIn(), vr.String(), len(currArgs))
+ }
+
+ // Output arguments
+ if t.NumOut() != 1 && t.NumOut() != 2 {
+ return nil, errors.Errorf("'%s' must have exactly 1 or 2 output arguments, the second argument must be of type error", vr.String())
+ }
+
+ // Evaluate all parameters
+ var parameters []reflect.Value
+
+ numArgs := t.NumIn()
+ isVariadic := t.IsVariadic()
+ var fnArg reflect.Type
+
+ for idx, arg := range currArgs {
+ pv, err := arg.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if isVariadic {
+ if idx >= t.NumIn()-1 {
+ fnArg = t.In(numArgs - 1).Elem()
+ } else {
+ fnArg = t.In(idx)
+ }
+ } else {
+ fnArg = t.In(idx)
+ }
+
+ if fnArg != typeOfValuePtr {
+ // Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument
+ if !isVariadic {
+ if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
+ return nil, errors.Errorf("Function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T).",
+ idx, vr.String(), fnArg.String(), pv.Interface())
+ }
+ // Function's argument has another type, using the interface-value
+ parameters = append(parameters, reflect.ValueOf(pv.Interface()))
+ } else {
+ if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
+ return nil, errors.Errorf("Function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T).",
+ vr.String(), fnArg.String(), pv.Interface())
+ }
+ // Function's argument has another type, using the interface-value
+ parameters = append(parameters, reflect.ValueOf(pv.Interface()))
+ }
+ } else {
+ // Function's argument is a *pongo2.Value
+ parameters = append(parameters, reflect.ValueOf(pv))
+ }
+ }
+
+ // Check if any of the values are invalid
+ for _, p := range parameters {
+ if p.Kind() == reflect.Invalid {
+ return nil, errors.Errorf("Calling a function using an invalid parameter")
+ }
+ }
+
+ // Call it and get first return parameter back
+ values := current.Call(parameters)
+ rv := values[0]
+ if t.NumOut() == 2 {
+ e := values[1].Interface()
+ if e != nil {
+ err, ok := e.(error)
+ if !ok {
+ return nil, errors.Errorf("The second return value is not an error")
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if rv.Type() != typeOfValuePtr {
+ current = reflect.ValueOf(rv.Interface())
+ } else {
+ // Return the function call value
+ current = rv.Interface().(*Value).val
+ isSafe = rv.Interface().(*Value).safe
+ }
+ }
+
+ if !current.IsValid() {
+ // Value is not valid (e. g. NIL value)
+ return AsValue(nil), nil
+ }
+ }
+
+ return &Value{val: current, safe: isSafe}, nil
+}
+
+func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ value, err := vr.resolve(ctx)
+ if err != nil {
+ return AsValue(nil), ctx.Error(err.Error(), vr.locationToken)
+ }
+ return value, nil
+}
+
+func (v *nodeFilteredVariable) FilterApplied(name string) bool {
+ for _, filter := range v.filterChain {
+ if filter.name == name {
+ return true
+ }
+ }
+ return false
+}
+
+func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ value, err := v.resolver.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, filter := range v.filterChain {
+ value, err = filter.Execute(value, ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return value, nil
+}
+
+// IDENT | IDENT.(IDENT|NUMBER)...
+func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) {
+ t := p.Current()
+
+ if t == nil {
+ return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken)
+ }
+
+ // Is first part a number or a string, there's nothing to resolve (because there's only to return the value then)
+ switch t.Typ {
+ case TokenNumber:
+ p.Consume()
+
+ // One exception to the rule that we don't have float64 literals is at the beginning
+ // of an expression (or a variable name). Since we know we started with an integer
+ // which can't obviously be a variable name, we can check whether the first number
+ // is followed by dot (and then a number again). If so we're converting it to a float64.
+
+ if p.Match(TokenSymbol, ".") != nil {
+ // float64
+ t2 := p.MatchType(TokenNumber)
+ if t2 == nil {
+ return nil, p.Error("Expected a number after the '.'.", nil)
+ }
+ f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64)
+ if err != nil {
+ return nil, p.Error(err.Error(), t)
+ }
+ fr := &floatResolver{
+ locationToken: t,
+ val: f,
+ }
+ return fr, nil
+ }
+ i, err := strconv.Atoi(t.Val)
+ if err != nil {
+ return nil, p.Error(err.Error(), t)
+ }
+ nr := &intResolver{
+ locationToken: t,
+ val: i,
+ }
+ return nr, nil
+
+ case TokenString:
+ p.Consume()
+ sr := &stringResolver{
+ locationToken: t,
+ val: t.Val,
+ }
+ return sr, nil
+ case TokenKeyword:
+ p.Consume()
+ switch t.Val {
+ case "true":
+ br := &boolResolver{
+ locationToken: t,
+ val: true,
+ }
+ return br, nil
+ case "false":
+ br := &boolResolver{
+ locationToken: t,
+ val: false,
+ }
+ return br, nil
+ default:
+ return nil, p.Error("This keyword is not allowed here.", nil)
+ }
+ }
+
+ resolver := &variableResolver{
+ locationToken: t,
+ }
+
+ // First part of a variable MUST be an identifier
+ if t.Typ != TokenIdentifier {
+ return nil, p.Error("Expected either a number, string, keyword or identifier.", t)
+ }
+
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeIdent,
+ s: t.Val,
+ })
+
+ p.Consume() // we consumed the first identifier of the variable name
+
+variableLoop:
+ for p.Remaining() > 0 {
+ t = p.Current()
+
+ if p.Match(TokenSymbol, ".") != nil {
+ // Next variable part (can be either NUMBER or IDENT)
+ t2 := p.Current()
+ if t2 != nil {
+ switch t2.Typ {
+ case TokenIdentifier:
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeIdent,
+ s: t2.Val,
+ })
+ p.Consume() // consume: IDENT
+ continue variableLoop
+ case TokenNumber:
+ i, err := strconv.Atoi(t2.Val)
+ if err != nil {
+ return nil, p.Error(err.Error(), t2)
+ }
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeInt,
+ i: i,
+ })
+ p.Consume() // consume: NUMBER
+ continue variableLoop
+ default:
+ return nil, p.Error("This token is not allowed within a variable name.", t2)
+ }
+ } else {
+ // EOF
+ return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.",
+ p.lastToken)
+ }
+ } else if p.Match(TokenSymbol, "(") != nil {
+ // Function call
+ // FunctionName '(' Comma-separated list of expressions ')'
+ part := resolver.parts[len(resolver.parts)-1]
+ part.isFunctionCall = true
+ argumentLoop:
+ for {
+ if p.Remaining() == 0 {
+ return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken)
+ }
+
+ if p.Peek(TokenSymbol, ")") == nil {
+ // No closing bracket, so we're parsing an expression
+ exprArg, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ part.callingArgs = append(part.callingArgs, exprArg)
+
+ if p.Match(TokenSymbol, ")") != nil {
+ // If there's a closing bracket after an expression, we will stop parsing the arguments
+ break argumentLoop
+ } else {
+ // If there's NO closing bracket, there MUST be an comma
+ if p.Match(TokenSymbol, ",") == nil {
+ return nil, p.Error("Missing comma or closing bracket after argument.", nil)
+ }
+ }
+ } else {
+ // We got a closing bracket, so stop parsing arguments
+ p.Consume()
+ break argumentLoop
+ }
+
+ }
+ // We're done parsing the function call, next variable part
+ continue variableLoop
+ }
+
+ // No dot or function call? Then we're done with the variable parsing
+ break
+ }
+
+ return resolver, nil
+}
+
+func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) {
+ v := &nodeFilteredVariable{
+ locationToken: p.Current(),
+ }
+
+ // Parse the variable name
+ resolver, err := p.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ v.resolver = resolver
+
+ // Parse all the filters
+filterLoop:
+ for p.Match(TokenSymbol, "|") != nil {
+ // Parse one single filter
+ filter, err := p.parseFilter()
+ if err != nil {
+ return nil, err
+ }
+
+ // Check sandbox filter restriction
+ if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned {
+ return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil)
+ }
+
+ v.filterChain = append(v.filterChain, filter)
+
+ continue filterLoop
+ }
+
+ return v, nil
+}
+
+func (p *Parser) parseVariableElement() (INode, *Error) {
+ node := &nodeVariable{
+ locationToken: p.Current(),
+ }
+
+ p.Consume() // consume '{{'
+
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ node.expr = expr
+
+ if p.Match(TokenSymbol, "}}") == nil {
+ return nil, p.Error("'}}' expected", nil)
+ }
+
+ return node, nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
new file mode 100644
index 0000000..ba49e3c
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -0,0 +1,5 @@
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
new file mode 100644
index 0000000..4cd0cba
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml
new file mode 100644
index 0000000..981d1bb
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml
@@ -0,0 +1,30 @@
+sudo: false
+language: go
+
+go:
+ - 1.8.x
+ - 1.9.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+before_script:
+ - go get -u github.com/golang/lint/golint
+
+script:
+ - go test -v --race ./...
+
+after_script:
+ - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - go vet ./...
+
+os:
+ - linux
+ - osx
+
+notifications:
+ email: false
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 0000000..5ab5d41
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,52 @@
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L
+Adrien Bustany
+Amit Krishnan
+Anmol Sethi
+Bjørn Erik Pedersen
+Bruno Bigras
+Caleb Spare
+Case Nelson
+Chris Howey
+Christoffer Buchholz
+Daniel Wagner-Hall
+Dave Cheney
+Evan Phoenix
+Francisco Souza
+Hari haran
+John C Barstow
+Kelvin Fo
+Ken-ichirou MATSUZAWA
+Matt Layher
+Nathan Youngman
+Nickolai Zeldovich
+Patrick
+Paul Hammond
+Pawel Knap
+Pieter Droogendijk
+Pursuit92
+Riku Voipio
+Rob Figueiredo
+Rodrigo Chiossi
+Slawek Ligus
+Soge Zhang
+Tiffany Jernigan
+Tilak Sharma
+Tom Payne
+Travis Cline
+Tudor Golubenco
+Vahe Khachikyan
+Yukang
+bronze1man
+debrando
+henrikedwards
+铁哥
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 0000000..be4d7ea
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,317 @@
+# Changelog
+
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 0000000..828a60b
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 0000000..f21e540
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 0000000..3993207
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,79 @@
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+|Adapter |OS |Status |
+|----------|----------|----------|
+|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
+|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
+|fanotify |Linux 2.6.37+ | |
+|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
+|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 0000000..ced39cb
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 0000000..190bf0d
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,66 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+func (op Op) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+ if buffer.Len() == 0 {
+ return ""
+ }
+ return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 0000000..d9fd1b8
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,337 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ unix.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer unix.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = unix.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == unix.EINTR {
+ continue
+ }
+
+ // unix.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
+ w.mu.Unlock()
+
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 0000000..cc7db4b
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = unix.EpollCreate1(0)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := unix.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = unix.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]unix.EpollEvent, 7)
+ for {
+ n, errno := unix.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == unix.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let unix.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := unix.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := unix.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ unix.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ unix.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ unix.Close(poller.epfd)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 0000000..86e76a3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,521 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+
+ // copy paths to remove while locked
+ var pathsToRemove = make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock()
+ // unlock before calling Remove, which also locks
+
+ for _, name := range pathsToRemove {
+ w.Remove(name)
+ }
+
+ // send a "quit" message to the reader goroutine
+ close(w.done)
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = unix.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return "", nil
+ }
+
+ // Don't watch named pipes.
+ if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ watchfd, err = unix.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]unix.Kevent_t, 10)
+
+loop:
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ break loop
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := unix.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+ return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
new file mode 100644
index 0000000..7d8de14
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 0000000..9139e17
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 0000000..09436f3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sysFSONESHOT = 0x80000000
+ sysFSONLYDIR = 0x1000000
+
+ // Events
+ sysFSACCESS = 0x1
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCLOSE = 0x18
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+
+ // Special events
+ sysFSIGNORED = 0x8000
+ sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sysFSONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sysFSQOVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sysFSONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sysFSMODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
diff --git a/vendor/github.com/garyburd/redigo/LICENSE b/vendor/github.com/garyburd/redigo/LICENSE
new file mode 100644
index 0000000..67db858
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/github.com/garyburd/redigo/internal/commandinfo.go
new file mode 100644
index 0000000..11e5842
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/internal/commandinfo.go
@@ -0,0 +1,54 @@
+// Copyright 2014 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package internal // import "github.com/garyburd/redigo/internal"
+
+import (
+ "strings"
+)
+
+const (
+ WatchState = 1 << iota
+ MultiState
+ SubscribeState
+ MonitorState
+)
+
+type CommandInfo struct {
+ Set, Clear int
+}
+
+var commandInfos = map[string]CommandInfo{
+ "WATCH": {Set: WatchState},
+ "UNWATCH": {Clear: WatchState},
+ "MULTI": {Set: MultiState},
+ "EXEC": {Clear: WatchState | MultiState},
+ "DISCARD": {Clear: WatchState | MultiState},
+ "PSUBSCRIBE": {Set: SubscribeState},
+ "SUBSCRIBE": {Set: SubscribeState},
+ "MONITOR": {Set: MonitorState},
+}
+
+func init() {
+ for n, ci := range commandInfos {
+ commandInfos[strings.ToLower(n)] = ci
+ }
+}
+
+func LookupCommandInfo(commandName string) CommandInfo {
+ if ci, ok := commandInfos[commandName]; ok {
+ return ci
+ }
+ return commandInfos[strings.ToUpper(commandName)]
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/conn.go b/vendor/github.com/garyburd/redigo/redis/conn.go
new file mode 100644
index 0000000..5aa0f32
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/conn.go
@@ -0,0 +1,673 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/url"
+ "regexp"
+ "strconv"
+ "sync"
+ "time"
+)
+
+var (
+ _ ConnWithTimeout = (*conn)(nil)
+)
+
+// conn is the low-level implementation of Conn
+type conn struct {
+ // Shared
+ mu sync.Mutex
+ pending int
+ err error
+ conn net.Conn
+
+ // Read
+ readTimeout time.Duration
+ br *bufio.Reader
+
+ // Write
+ writeTimeout time.Duration
+ bw *bufio.Writer
+
+ // Scratch space for formatting argument length.
+ // '*' or '$', length, "\r\n"
+ lenScratch [32]byte
+
+ // Scratch space for formatting integers and floats.
+ numScratch [40]byte
+}
+
+// DialTimeout acts like Dial but takes timeouts for establishing the
+// connection to the server, writing a command and reading a reply.
+//
+// Deprecated: Use Dial with options instead.
+func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) {
+ return Dial(network, address,
+ DialConnectTimeout(connectTimeout),
+ DialReadTimeout(readTimeout),
+ DialWriteTimeout(writeTimeout))
+}
+
+// DialOption specifies an option for dialing a Redis server.
+type DialOption struct {
+ f func(*dialOptions)
+}
+
+type dialOptions struct {
+ readTimeout time.Duration
+ writeTimeout time.Duration
+ dialer *net.Dialer
+ dial func(network, addr string) (net.Conn, error)
+ db int
+ password string
+ useTLS bool
+ skipVerify bool
+ tlsConfig *tls.Config
+}
+
+// DialReadTimeout specifies the timeout for reading a single command reply.
+func DialReadTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.readTimeout = d
+ }}
+}
+
+// DialWriteTimeout specifies the timeout for writing a single command.
+func DialWriteTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.writeTimeout = d
+ }}
+}
+
+// DialConnectTimeout specifies the timeout for connecting to the Redis server when
+// no DialNetDial option is specified.
+func DialConnectTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dialer.Timeout = d
+ }}
+}
+
+// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server
+// when no DialNetDial option is specified.
+// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then
+// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected.
+func DialKeepAlive(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dialer.KeepAlive = d
+ }}
+}
+
+// DialNetDial specifies a custom dial function for creating TCP
+// connections, otherwise a net.Dialer customized via the other options is used.
+// DialNetDial overrides DialConnectTimeout and DialKeepAlive.
+func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dial = dial
+ }}
+}
+
+// DialDatabase specifies the database to select when dialing a connection.
+func DialDatabase(db int) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.db = db
+ }}
+}
+
+// DialPassword specifies the password to use when connecting to
+// the Redis server.
+func DialPassword(password string) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.password = password
+ }}
+}
+
+// DialTLSConfig specifies the config to use when a TLS connection is dialed.
+// Has no effect when not dialing a TLS connection.
+func DialTLSConfig(c *tls.Config) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.tlsConfig = c
+ }}
+}
+
+// DialTLSSkipVerify disables server name verification when connecting over
+// TLS. Has no effect when not dialing a TLS connection.
+func DialTLSSkipVerify(skip bool) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.skipVerify = skip
+ }}
+}
+
+// DialUseTLS specifies whether TLS should be used when connecting to the
+// server. This option is ignore by DialURL.
+func DialUseTLS(useTLS bool) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.useTLS = useTLS
+ }}
+}
+
+// Dial connects to the Redis server at the given network and
+// address using the specified options.
+func Dial(network, address string, options ...DialOption) (Conn, error) {
+ do := dialOptions{
+ dialer: &net.Dialer{
+ KeepAlive: time.Minute * 5,
+ },
+ }
+ for _, option := range options {
+ option.f(&do)
+ }
+ if do.dial == nil {
+ do.dial = do.dialer.Dial
+ }
+
+ netConn, err := do.dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+
+ if do.useTLS {
+ var tlsConfig *tls.Config
+ if do.tlsConfig == nil {
+ tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify}
+ } else {
+ tlsConfig = cloneTLSConfig(do.tlsConfig)
+ }
+ if tlsConfig.ServerName == "" {
+ host, _, err := net.SplitHostPort(address)
+ if err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ tlsConfig.ServerName = host
+ }
+
+ tlsConn := tls.Client(netConn, tlsConfig)
+ if err := tlsConn.Handshake(); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ netConn = tlsConn
+ }
+
+ c := &conn{
+ conn: netConn,
+ bw: bufio.NewWriter(netConn),
+ br: bufio.NewReader(netConn),
+ readTimeout: do.readTimeout,
+ writeTimeout: do.writeTimeout,
+ }
+
+ if do.password != "" {
+ if _, err := c.Do("AUTH", do.password); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ }
+
+ if do.db != 0 {
+ if _, err := c.Do("SELECT", do.db); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ }
+
+ return c, nil
+}
+
+var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`)
+
+// DialURL connects to a Redis server at the given URL using the Redis
+// URI scheme. URLs should follow the draft IANA specification for the
+// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis).
+func DialURL(rawurl string, options ...DialOption) (Conn, error) {
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Scheme != "redis" && u.Scheme != "rediss" {
+ return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme)
+ }
+
+ // As per the IANA draft spec, the host defaults to localhost and
+ // the port defaults to 6379.
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // assume port is missing
+ host = u.Host
+ port = "6379"
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ address := net.JoinHostPort(host, port)
+
+ if u.User != nil {
+ password, isSet := u.User.Password()
+ if isSet {
+ options = append(options, DialPassword(password))
+ }
+ }
+
+ match := pathDBRegexp.FindStringSubmatch(u.Path)
+ if len(match) == 2 {
+ db := 0
+ if len(match[1]) > 0 {
+ db, err = strconv.Atoi(match[1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
+ }
+ }
+ if db != 0 {
+ options = append(options, DialDatabase(db))
+ }
+ } else if u.Path != "" {
+ return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
+ }
+
+ options = append(options, DialUseTLS(u.Scheme == "rediss"))
+
+ return Dial("tcp", address, options...)
+}
+
+// NewConn returns a new Redigo connection for the given net connection.
+func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn {
+ return &conn{
+ conn: netConn,
+ bw: bufio.NewWriter(netConn),
+ br: bufio.NewReader(netConn),
+ readTimeout: readTimeout,
+ writeTimeout: writeTimeout,
+ }
+}
+
+func (c *conn) Close() error {
+ c.mu.Lock()
+ err := c.err
+ if c.err == nil {
+ c.err = errors.New("redigo: closed")
+ err = c.conn.Close()
+ }
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) fatal(err error) error {
+ c.mu.Lock()
+ if c.err == nil {
+ c.err = err
+ // Close connection to force errors on subsequent calls and to unblock
+ // other reader or writer.
+ c.conn.Close()
+ }
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) Err() error {
+ c.mu.Lock()
+ err := c.err
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) writeLen(prefix byte, n int) error {
+ c.lenScratch[len(c.lenScratch)-1] = '\n'
+ c.lenScratch[len(c.lenScratch)-2] = '\r'
+ i := len(c.lenScratch) - 3
+ for {
+ c.lenScratch[i] = byte('0' + n%10)
+ i -= 1
+ n = n / 10
+ if n == 0 {
+ break
+ }
+ }
+ c.lenScratch[i] = prefix
+ _, err := c.bw.Write(c.lenScratch[i:])
+ return err
+}
+
+func (c *conn) writeString(s string) error {
+ c.writeLen('$', len(s))
+ c.bw.WriteString(s)
+ _, err := c.bw.WriteString("\r\n")
+ return err
+}
+
+func (c *conn) writeBytes(p []byte) error {
+ c.writeLen('$', len(p))
+ c.bw.Write(p)
+ _, err := c.bw.WriteString("\r\n")
+ return err
+}
+
+func (c *conn) writeInt64(n int64) error {
+ return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10))
+}
+
+func (c *conn) writeFloat64(n float64) error {
+ return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))
+}
+
+func (c *conn) writeCommand(cmd string, args []interface{}) error {
+ c.writeLen('*', 1+len(args))
+ if err := c.writeString(cmd); err != nil {
+ return err
+ }
+ for _, arg := range args {
+ if err := c.writeArg(arg, true); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) {
+ switch arg := arg.(type) {
+ case string:
+ return c.writeString(arg)
+ case []byte:
+ return c.writeBytes(arg)
+ case int:
+ return c.writeInt64(int64(arg))
+ case int64:
+ return c.writeInt64(arg)
+ case float64:
+ return c.writeFloat64(arg)
+ case bool:
+ if arg {
+ return c.writeString("1")
+ } else {
+ return c.writeString("0")
+ }
+ case nil:
+ return c.writeString("")
+ case Argument:
+ if argumentTypeOK {
+ return c.writeArg(arg.RedisArg(), false)
+ }
+ // See comment in default clause below.
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, arg)
+ return c.writeBytes(buf.Bytes())
+ default:
+ // This default clause is intended to handle builtin numeric types.
+ // The function should return an error for other types, but this is not
+ // done for compatibility with previous versions of the package.
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, arg)
+ return c.writeBytes(buf.Bytes())
+ }
+}
+
+type protocolError string
+
+func (pe protocolError) Error() string {
+ return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe))
+}
+
+func (c *conn) readLine() ([]byte, error) {
+ p, err := c.br.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ return nil, protocolError("long response line")
+ }
+ if err != nil {
+ return nil, err
+ }
+ i := len(p) - 2
+ if i < 0 || p[i] != '\r' {
+ return nil, protocolError("bad response line terminator")
+ }
+ return p[:i], nil
+}
+
+// parseLen parses bulk string and array lengths.
+func parseLen(p []byte) (int, error) {
+ if len(p) == 0 {
+ return -1, protocolError("malformed length")
+ }
+
+ if p[0] == '-' && len(p) == 2 && p[1] == '1' {
+ // handle $-1 and $-1 null replies.
+ return -1, nil
+ }
+
+ var n int
+ for _, b := range p {
+ n *= 10
+ if b < '0' || b > '9' {
+ return -1, protocolError("illegal bytes in length")
+ }
+ n += int(b - '0')
+ }
+
+ return n, nil
+}
+
+// parseInt parses an integer reply.
+func parseInt(p []byte) (interface{}, error) {
+ if len(p) == 0 {
+ return 0, protocolError("malformed integer")
+ }
+
+ var negate bool
+ if p[0] == '-' {
+ negate = true
+ p = p[1:]
+ if len(p) == 0 {
+ return 0, protocolError("malformed integer")
+ }
+ }
+
+ var n int64
+ for _, b := range p {
+ n *= 10
+ if b < '0' || b > '9' {
+ return 0, protocolError("illegal bytes in length")
+ }
+ n += int64(b - '0')
+ }
+
+ if negate {
+ n = -n
+ }
+ return n, nil
+}
+
+var (
+ okReply interface{} = "OK"
+ pongReply interface{} = "PONG"
+)
+
+func (c *conn) readReply() (interface{}, error) {
+ line, err := c.readLine()
+ if err != nil {
+ return nil, err
+ }
+ if len(line) == 0 {
+ return nil, protocolError("short response line")
+ }
+ switch line[0] {
+ case '+':
+ switch {
+ case len(line) == 3 && line[1] == 'O' && line[2] == 'K':
+ // Avoid allocation for frequent "+OK" response.
+ return okReply, nil
+ case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G':
+ // Avoid allocation in PING command benchmarks :)
+ return pongReply, nil
+ default:
+ return string(line[1:]), nil
+ }
+ case '-':
+ return Error(string(line[1:])), nil
+ case ':':
+ return parseInt(line[1:])
+ case '$':
+ n, err := parseLen(line[1:])
+ if n < 0 || err != nil {
+ return nil, err
+ }
+ p := make([]byte, n)
+ _, err = io.ReadFull(c.br, p)
+ if err != nil {
+ return nil, err
+ }
+ if line, err := c.readLine(); err != nil {
+ return nil, err
+ } else if len(line) != 0 {
+ return nil, protocolError("bad bulk string format")
+ }
+ return p, nil
+ case '*':
+ n, err := parseLen(line[1:])
+ if n < 0 || err != nil {
+ return nil, err
+ }
+ r := make([]interface{}, n)
+ for i := range r {
+ r[i], err = c.readReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return r, nil
+ }
+ return nil, protocolError("unexpected response line")
+}
+
+func (c *conn) Send(cmd string, args ...interface{}) error {
+ c.mu.Lock()
+ c.pending += 1
+ c.mu.Unlock()
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+ if err := c.writeCommand(cmd, args); err != nil {
+ return c.fatal(err)
+ }
+ return nil
+}
+
+func (c *conn) Flush() error {
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+ if err := c.bw.Flush(); err != nil {
+ return c.fatal(err)
+ }
+ return nil
+}
+
+func (c *conn) Receive() (interface{}, error) {
+ return c.ReceiveWithTimeout(c.readTimeout)
+}
+
+func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) {
+ var deadline time.Time
+ if timeout != 0 {
+ deadline = time.Now().Add(timeout)
+ }
+ c.conn.SetReadDeadline(deadline)
+
+ if reply, err = c.readReply(); err != nil {
+ return nil, c.fatal(err)
+ }
+ // When using pub/sub, the number of receives can be greater than the
+ // number of sends. To enable normal use of the connection after
+ // unsubscribing from all channels, we do not decrement pending to a
+ // negative value.
+ //
+ // The pending field is decremented after the reply is read to handle the
+ // case where Receive is called before Send.
+ c.mu.Lock()
+ if c.pending > 0 {
+ c.pending -= 1
+ }
+ c.mu.Unlock()
+ if err, ok := reply.(Error); ok {
+ return nil, err
+ }
+ return
+}
+
+func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) {
+ return c.DoWithTimeout(c.readTimeout, cmd, args...)
+}
+
+func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) {
+ c.mu.Lock()
+ pending := c.pending
+ c.pending = 0
+ c.mu.Unlock()
+
+ if cmd == "" && pending == 0 {
+ return nil, nil
+ }
+
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+
+ if cmd != "" {
+ if err := c.writeCommand(cmd, args); err != nil {
+ return nil, c.fatal(err)
+ }
+ }
+
+ if err := c.bw.Flush(); err != nil {
+ return nil, c.fatal(err)
+ }
+
+ var deadline time.Time
+ if readTimeout != 0 {
+ deadline = time.Now().Add(readTimeout)
+ }
+ c.conn.SetReadDeadline(deadline)
+
+ if cmd == "" {
+ reply := make([]interface{}, pending)
+ for i := range reply {
+ r, e := c.readReply()
+ if e != nil {
+ return nil, c.fatal(e)
+ }
+ reply[i] = r
+ }
+ return reply, nil
+ }
+
+ var err error
+ var reply interface{}
+ for i := 0; i <= pending; i++ {
+ var e error
+ if reply, e = c.readReply(); e != nil {
+ return nil, c.fatal(e)
+ }
+ if e, ok := reply.(Error); ok && err == nil {
+ err = e
+ }
+ }
+ return reply, err
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/garyburd/redigo/redis/doc.go
new file mode 100644
index 0000000..1d19c16
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/doc.go
@@ -0,0 +1,177 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package redis is a client for the Redis database.
+//
+// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more
+// documentation about this package.
+//
+// Connections
+//
+// The Conn interface is the primary interface for working with Redis.
+// Applications create connections by calling the Dial, DialWithTimeout or
+// NewConn functions. In the future, functions will be added for creating
+// sharded and other types of connections.
+//
+// The application must call the connection Close method when the application
+// is done with the connection.
+//
+// Executing Commands
+//
+// The Conn interface has a generic method for executing Redis commands:
+//
+// Do(commandName string, args ...interface{}) (reply interface{}, err error)
+//
+// The Redis command reference (http://redis.io/commands) lists the available
+// commands. An example of using the Redis APPEND command is:
+//
+// n, err := conn.Do("APPEND", "key", "value")
+//
+// The Do method converts command arguments to bulk strings for transmission
+// to the server as follows:
+//
+// Go Type Conversion
+// []byte Sent as is
+// string Sent as is
+// int, int64 strconv.FormatInt(v)
+// float64 strconv.FormatFloat(v, 'g', -1, 64)
+// bool true -> "1", false -> "0"
+// nil ""
+// all other types fmt.Fprint(w, v)
+//
+// Redis command reply types are represented using the following Go types:
+//
+// Redis type Go type
+// error redis.Error
+// integer int64
+// simple string string
+// bulk string []byte or nil if value not present.
+// array []interface{} or nil if value not present.
+//
+// Use type assertions or the reply helper functions to convert from
+// interface{} to the specific Go type for the command result.
+//
+// Pipelining
+//
+// Connections support pipelining using the Send, Flush and Receive methods.
+//
+// Send(commandName string, args ...interface{}) error
+// Flush() error
+// Receive() (reply interface{}, err error)
+//
+// Send writes the command to the connection's output buffer. Flush flushes the
+// connection's output buffer to the server. Receive reads a single reply from
+// the server. The following example shows a simple pipeline.
+//
+// c.Send("SET", "foo", "bar")
+// c.Send("GET", "foo")
+// c.Flush()
+// c.Receive() // reply from SET
+// v, err = c.Receive() // reply from GET
+//
+// The Do method combines the functionality of the Send, Flush and Receive
+// methods. The Do method starts by writing the command and flushing the output
+// buffer. Next, the Do method receives all pending replies including the reply
+// for the command just sent by Do. If any of the received replies is an error,
+// then Do returns the error. If there are no errors, then Do returns the last
+// reply. If the command argument to the Do method is "", then the Do method
+// will flush the output buffer and receive pending replies without sending a
+// command.
+//
+// Use the Send and Do methods to implement pipelined transactions.
+//
+// c.Send("MULTI")
+// c.Send("INCR", "foo")
+// c.Send("INCR", "bar")
+// r, err := c.Do("EXEC")
+// fmt.Println(r) // prints [1, 1]
+//
+// Concurrency
+//
+// Connections support one concurrent caller to the Receive method and one
+// concurrent caller to the Send and Flush methods. No other concurrency is
+// supported including concurrent calls to the Do method.
+//
+// For full concurrent access to Redis, use the thread-safe Pool to get, use
+// and release a connection from within a goroutine. Connections returned from
+// a Pool have the concurrency restrictions described in the previous
+// paragraph.
+//
+// Publish and Subscribe
+//
+// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers.
+//
+// c.Send("SUBSCRIBE", "example")
+// c.Flush()
+// for {
+// reply, err := c.Receive()
+// if err != nil {
+// return err
+// }
+// // process pushed message
+// }
+//
+// The PubSubConn type wraps a Conn with convenience methods for implementing
+// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods
+// send and flush a subscription management command. The receive method
+// converts a pushed message to convenient types for use in a type switch.
+//
+// psc := redis.PubSubConn{Conn: c}
+// psc.Subscribe("example")
+// for {
+// switch v := psc.Receive().(type) {
+// case redis.Message:
+// fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
+// case redis.Subscription:
+// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
+// case error:
+// return v
+// }
+// }
+//
+// Reply Helpers
+//
+// The Bool, Int, Bytes, String, Strings and Values functions convert a reply
+// to a value of a specific type. To allow convenient wrapping of calls to the
+// connection Do and Receive methods, the functions take a second argument of
+// type error. If the error is non-nil, then the helper function returns the
+// error. If the error is nil, the function converts the reply to the specified
+// type:
+//
+// exists, err := redis.Bool(c.Do("EXISTS", "foo"))
+// if err != nil {
+// // handle error return from c.Do or type conversion error.
+// }
+//
+// The Scan function converts elements of a array reply to Go types:
+//
+// var value1 int
+// var value2 string
+// reply, err := redis.Values(c.Do("MGET", "key1", "key2"))
+// if err != nil {
+// // handle error
+// }
+// if _, err := redis.Scan(reply, &value1, &value2); err != nil {
+// // handle error
+// }
+//
+// Errors
+//
+// Connection methods return error replies from the server as type redis.Error.
+//
+// Call the connection Err() method to determine if the connection encountered
+// non-recoverable error such as a network error or protocol parsing error. If
+// Err() returns a non-nil value, then the connection is not usable and should
+// be closed.
+package redis // import "github.com/garyburd/redigo/redis"
diff --git a/vendor/github.com/garyburd/redigo/redis/go16.go b/vendor/github.com/garyburd/redigo/redis/go16.go
new file mode 100644
index 0000000..f6b1a7c
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/go16.go
@@ -0,0 +1,27 @@
+// +build !go1.7
+
+package redis
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/go17.go b/vendor/github.com/garyburd/redigo/redis/go17.go
new file mode 100644
index 0000000..5f36379
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/go17.go
@@ -0,0 +1,29 @@
+// +build go1.7,!go1.8
+
+package redis
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
+ Renegotiation: cfg.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/go18.go b/vendor/github.com/garyburd/redigo/redis/go18.go
new file mode 100644
index 0000000..558363b
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/go18.go
@@ -0,0 +1,9 @@
+// +build go1.8
+
+package redis
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ return cfg.Clone()
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/log.go b/vendor/github.com/garyburd/redigo/redis/log.go
new file mode 100644
index 0000000..b299661
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/log.go
@@ -0,0 +1,134 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "time"
+)
+
+var (
+ _ ConnWithTimeout = (*loggingConn)(nil)
+)
+
+// NewLoggingConn returns a logging wrapper around a connection.
+func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn {
+ if prefix != "" {
+ prefix = prefix + "."
+ }
+ return &loggingConn{conn, logger, prefix}
+}
+
+type loggingConn struct {
+ Conn
+ logger *log.Logger
+ prefix string
+}
+
+func (c *loggingConn) Close() error {
+ err := c.Conn.Close()
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err)
+ c.logger.Output(2, buf.String())
+ return err
+}
+
+func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) {
+ const chop = 32
+ switch v := v.(type) {
+ case []byte:
+ if len(v) > chop {
+ fmt.Fprintf(buf, "%q...", v[:chop])
+ } else {
+ fmt.Fprintf(buf, "%q", v)
+ }
+ case string:
+ if len(v) > chop {
+ fmt.Fprintf(buf, "%q...", v[:chop])
+ } else {
+ fmt.Fprintf(buf, "%q", v)
+ }
+ case []interface{}:
+ if len(v) == 0 {
+ buf.WriteString("[]")
+ } else {
+ sep := "["
+ fin := "]"
+ if len(v) > chop {
+ v = v[:chop]
+ fin = "...]"
+ }
+ for _, vv := range v {
+ buf.WriteString(sep)
+ c.printValue(buf, vv)
+ sep = ", "
+ }
+ buf.WriteString(fin)
+ }
+ default:
+ fmt.Fprint(buf, v)
+ }
+}
+
+func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%s%s(", c.prefix, method)
+ if method != "Receive" {
+ buf.WriteString(commandName)
+ for _, arg := range args {
+ buf.WriteString(", ")
+ c.printValue(&buf, arg)
+ }
+ }
+ buf.WriteString(") -> (")
+ if method != "Send" {
+ c.printValue(&buf, reply)
+ buf.WriteString(", ")
+ }
+ fmt.Fprintf(&buf, "%v)", err)
+ c.logger.Output(3, buf.String())
+}
+
+func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) {
+ reply, err := c.Conn.Do(commandName, args...)
+ c.print("Do", commandName, args, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) {
+ reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...)
+ c.print("DoWithTimeout", commandName, args, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) Send(commandName string, args ...interface{}) error {
+ err := c.Conn.Send(commandName, args...)
+ c.print("Send", commandName, args, nil, err)
+ return err
+}
+
+func (c *loggingConn) Receive() (interface{}, error) {
+ reply, err := c.Conn.Receive()
+ c.print("Receive", "", nil, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) {
+ reply, err := ReceiveWithTimeout(c.Conn, timeout)
+ c.print("ReceiveWithTimeout", "", nil, reply, err)
+ return reply, err
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/garyburd/redigo/redis/pool.go
new file mode 100644
index 0000000..3e6f426
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/pool.go
@@ -0,0 +1,527 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "errors"
+ "io"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/garyburd/redigo/internal"
+)
+
+var (
+ _ ConnWithTimeout = (*pooledConnection)(nil)
+ _ ConnWithTimeout = (*errorConnection)(nil)
+)
+
+var nowFunc = time.Now // for testing
+
+// ErrPoolExhausted is returned from a pool connection method (Do, Send,
+// Receive, Flush, Err) when the maximum number of database connections in the
+// pool has been reached.
+var ErrPoolExhausted = errors.New("redigo: connection pool exhausted")
+
+var (
+ errPoolClosed = errors.New("redigo: connection pool closed")
+ errConnClosed = errors.New("redigo: connection closed")
+)
+
+// Pool maintains a pool of connections. The application calls the Get method
+// to get a connection from the pool and the connection's Close method to
+// return the connection's resources to the pool.
+//
+// The following example shows how to use a pool in a web application. The
+// application creates a pool at application startup and makes it available to
+// request handlers using a package level variable. The pool configuration used
+// here is an example, not a recommendation.
+//
+// func newPool(addr string) *redis.Pool {
+// return &redis.Pool{
+// MaxIdle: 3,
+// IdleTimeout: 240 * time.Second,
+// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) },
+// }
+// }
+//
+// var (
+// pool *redis.Pool
+// redisServer = flag.String("redisServer", ":6379", "")
+// )
+//
+// func main() {
+// flag.Parse()
+// pool = newPool(*redisServer)
+// ...
+// }
+//
+// A request handler gets a connection from the pool and closes the connection
+// when the handler is done:
+//
+// func serveHome(w http.ResponseWriter, r *http.Request) {
+// conn := pool.Get()
+// defer conn.Close()
+// ...
+// }
+//
+// Use the Dial function to authenticate connections with the AUTH command or
+// select a database with the SELECT command:
+//
+// pool := &redis.Pool{
+// // Other pool configuration not shown in this example.
+// Dial: func () (redis.Conn, error) {
+// c, err := redis.Dial("tcp", server)
+// if err != nil {
+// return nil, err
+// }
+// if _, err := c.Do("AUTH", password); err != nil {
+// c.Close()
+// return nil, err
+// }
+// if _, err := c.Do("SELECT", db); err != nil {
+// c.Close()
+// return nil, err
+// }
+// return c, nil
+// },
+// }
+//
+// Use the TestOnBorrow function to check the health of an idle connection
+// before the connection is returned to the application. This example PINGs
+// connections that have been idle more than a minute:
+//
+// pool := &redis.Pool{
+// // Other pool configuration not shown in this example.
+// TestOnBorrow: func(c redis.Conn, t time.Time) error {
+// if time.Since(t) < time.Minute {
+// return nil
+// }
+// _, err := c.Do("PING")
+// return err
+// },
+// }
+//
+type Pool struct {
+ // Dial is an application supplied function for creating and configuring a
+ // connection.
+ //
+ // The connection returned from Dial must not be in a special state
+ // (subscribed to pubsub channel, transaction started, ...).
+ Dial func() (Conn, error)
+
+ // TestOnBorrow is an optional application supplied function for checking
+ // the health of an idle connection before the connection is used again by
+ // the application. Argument t is the time that the connection was returned
+ // to the pool. If the function returns an error, then the connection is
+ // closed.
+ TestOnBorrow func(c Conn, t time.Time) error
+
+ // Maximum number of idle connections in the pool.
+ MaxIdle int
+
+ // Maximum number of connections allocated by the pool at a given time.
+ // When zero, there is no limit on the number of connections in the pool.
+ MaxActive int
+
+ // Close connections after remaining idle for this duration. If the value
+ // is zero, then idle connections are not closed. Applications should set
+ // the timeout to a value less than the server's timeout.
+ IdleTimeout time.Duration
+
+ // If Wait is true and the pool is at the MaxActive limit, then Get() waits
+ // for a connection to be returned to the pool before returning.
+ Wait bool
+
+ chInitialized uint32 // set to 1 when field ch is initialized
+
+ mu sync.Mutex // mu protects the following fields
+ closed bool // set to true when the pool is closed.
+ active int // the number of open connections in the pool
+ ch chan struct{} // limits open connections when p.Wait is true
+ idle idleList // idle connections
+}
+
+// NewPool creates a new pool.
+//
+// Deprecated: Initialize the Pool directory as shown in the example.
+func NewPool(newFn func() (Conn, error), maxIdle int) *Pool {
+ return &Pool{Dial: newFn, MaxIdle: maxIdle}
+}
+
+// Get gets a connection. The application must close the returned connection.
+// This method always returns a valid connection so that applications can defer
+// error handling to the first use of the connection. If there is an error
+// getting an underlying connection, then the connection Err, Do, Send, Flush
+// and Receive methods return that error.
+func (p *Pool) Get() Conn {
+ c, err := p.get(nil)
+ if err != nil {
+ return errorConnection{err}
+ }
+ return &pooledConnection{p: p, c: c}
+}
+
+// PoolStats contains pool statistics.
+type PoolStats struct {
+ // ActiveCount is the number of connections in the pool. The count includes
+ // idle connections and connections in use.
+ ActiveCount int
+ // IdleCount is the number of idle connections in the pool.
+ IdleCount int
+}
+
+// Stats returns pool's statistics.
+func (p *Pool) Stats() PoolStats {
+ p.mu.Lock()
+ stats := PoolStats{
+ ActiveCount: p.active,
+ IdleCount: p.idle.count,
+ }
+ p.mu.Unlock()
+
+ return stats
+}
+
+// ActiveCount returns the number of connections in the pool. The count
+// includes idle connections and connections in use.
+func (p *Pool) ActiveCount() int {
+ p.mu.Lock()
+ active := p.active
+ p.mu.Unlock()
+ return active
+}
+
+// IdleCount returns the number of idle connections in the pool.
+func (p *Pool) IdleCount() int {
+ p.mu.Lock()
+ idle := p.idle.count
+ p.mu.Unlock()
+ return idle
+}
+
+// Close releases the resources used by the pool.
+func (p *Pool) Close() error {
+ p.mu.Lock()
+ if p.closed {
+ p.mu.Unlock()
+ return nil
+ }
+ p.closed = true
+ p.active -= p.idle.count
+ ic := p.idle.front
+ p.idle.count = 0
+ p.idle.front, p.idle.back = nil, nil
+ if p.ch != nil {
+ close(p.ch)
+ }
+ p.mu.Unlock()
+ for ; ic != nil; ic = ic.next {
+ ic.c.Close()
+ }
+ return nil
+}
+
+func (p *Pool) lazyInit() {
+ // Fast path.
+ if atomic.LoadUint32(&p.chInitialized) == 1 {
+ return
+ }
+ // Slow path.
+ p.mu.Lock()
+ if p.chInitialized == 0 {
+ p.ch = make(chan struct{}, p.MaxActive)
+ if p.closed {
+ close(p.ch)
+ } else {
+ for i := 0; i < p.MaxActive; i++ {
+ p.ch <- struct{}{}
+ }
+ }
+ atomic.StoreUint32(&p.chInitialized, 1)
+ }
+ p.mu.Unlock()
+}
+
+// get prunes stale connections and returns a connection from the idle list or
+// creates a new connection.
+func (p *Pool) get(ctx interface {
+ Done() <-chan struct{}
+ Err() error
+}) (Conn, error) {
+
+ // Handle limit for p.Wait == true.
+ if p.Wait && p.MaxActive > 0 {
+ p.lazyInit()
+ if ctx == nil {
+ <-p.ch
+ } else {
+ select {
+ case <-p.ch:
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+ }
+
+ p.mu.Lock()
+
+ // Prune stale connections at the back of the idle list.
+ if p.IdleTimeout > 0 {
+ n := p.idle.count
+ for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ {
+ c := p.idle.back.c
+ p.idle.popBack()
+ p.mu.Unlock()
+ c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+ }
+
+ // Get idle connection from the front of idle list.
+ for p.idle.front != nil {
+ ic := p.idle.front
+ p.idle.popFront()
+ p.mu.Unlock()
+ if p.TestOnBorrow == nil || p.TestOnBorrow(ic.c, ic.t) == nil {
+ return ic.c, nil
+ }
+ ic.c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+
+ // Check for pool closed before dialing a new connection.
+ if p.closed {
+ p.mu.Unlock()
+ return nil, errors.New("redigo: get on closed pool")
+ }
+
+ // Handle limit for p.Wait == false.
+ if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive {
+ p.mu.Unlock()
+ return nil, ErrPoolExhausted
+ }
+
+ p.active++
+ p.mu.Unlock()
+ c, err := p.Dial()
+ if err != nil {
+ c = nil
+ p.mu.Lock()
+ p.active--
+ if p.ch != nil && !p.closed {
+ p.ch <- struct{}{}
+ }
+ p.mu.Unlock()
+ }
+ return c, err
+}
+
+func (p *Pool) put(c Conn, forceClose bool) error {
+ p.mu.Lock()
+ if !p.closed && !forceClose {
+ p.idle.pushFront(&idleConn{t: nowFunc(), c: c})
+ if p.idle.count > p.MaxIdle {
+ c = p.idle.back.c
+ p.idle.popBack()
+ } else {
+ c = nil
+ }
+ }
+
+ if c != nil {
+ p.mu.Unlock()
+ c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+
+ if p.ch != nil && !p.closed {
+ p.ch <- struct{}{}
+ }
+ p.mu.Unlock()
+ return nil
+}
+
+type pooledConnection struct {
+ p *Pool
+ c Conn
+ state int
+}
+
+var (
+ sentinel []byte
+ sentinelOnce sync.Once
+)
+
+func initSentinel() {
+ p := make([]byte, 64)
+ if _, err := rand.Read(p); err == nil {
+ sentinel = p
+ } else {
+ h := sha1.New()
+ io.WriteString(h, "Oops, rand failed. Use time instead.")
+ io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10))
+ sentinel = h.Sum(nil)
+ }
+}
+
+func (pc *pooledConnection) Close() error {
+ c := pc.c
+ if _, ok := c.(errorConnection); ok {
+ return nil
+ }
+ pc.c = errorConnection{errConnClosed}
+
+ if pc.state&internal.MultiState != 0 {
+ c.Send("DISCARD")
+ pc.state &^= (internal.MultiState | internal.WatchState)
+ } else if pc.state&internal.WatchState != 0 {
+ c.Send("UNWATCH")
+ pc.state &^= internal.WatchState
+ }
+ if pc.state&internal.SubscribeState != 0 {
+ c.Send("UNSUBSCRIBE")
+ c.Send("PUNSUBSCRIBE")
+ // To detect the end of the message stream, ask the server to echo
+ // a sentinel value and read until we see that value.
+ sentinelOnce.Do(initSentinel)
+ c.Send("ECHO", sentinel)
+ c.Flush()
+ for {
+ p, err := c.Receive()
+ if err != nil {
+ break
+ }
+ if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) {
+ pc.state &^= internal.SubscribeState
+ break
+ }
+ }
+ }
+ c.Do("")
+ pc.p.put(c, pc.state != 0 || c.Err() != nil)
+ return nil
+}
+
+func (pc *pooledConnection) Err() error {
+ return pc.c.Err()
+}
+
+func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+ ci := internal.LookupCommandInfo(commandName)
+ pc.state = (pc.state | ci.Set) &^ ci.Clear
+ return pc.c.Do(commandName, args...)
+}
+
+func (pc *pooledConnection) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) {
+ cwt, ok := pc.c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ ci := internal.LookupCommandInfo(commandName)
+ pc.state = (pc.state | ci.Set) &^ ci.Clear
+ return cwt.DoWithTimeout(timeout, commandName, args...)
+}
+
+func (pc *pooledConnection) Send(commandName string, args ...interface{}) error {
+ ci := internal.LookupCommandInfo(commandName)
+ pc.state = (pc.state | ci.Set) &^ ci.Clear
+ return pc.c.Send(commandName, args...)
+}
+
+func (pc *pooledConnection) Flush() error {
+ return pc.c.Flush()
+}
+
+func (pc *pooledConnection) Receive() (reply interface{}, err error) {
+ return pc.c.Receive()
+}
+
+func (pc *pooledConnection) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) {
+ cwt, ok := pc.c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.ReceiveWithTimeout(timeout)
+}
+
+type errorConnection struct{ err error }
+
+func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err }
+func (ec errorConnection) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) {
+ return nil, ec.err
+}
+func (ec errorConnection) Send(string, ...interface{}) error { return ec.err }
+func (ec errorConnection) Err() error { return ec.err }
+func (ec errorConnection) Close() error { return nil }
+func (ec errorConnection) Flush() error { return ec.err }
+func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err }
+func (ec errorConnection) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err }
+
+type idleList struct {
+ count int
+ front, back *idleConn
+}
+
+type idleConn struct {
+ c Conn
+ t time.Time
+ next, prev *idleConn
+}
+
+func (l *idleList) pushFront(ic *idleConn) {
+ ic.next = l.front
+ ic.prev = nil
+ if l.count == 0 {
+ l.back = ic
+ } else {
+ l.front.prev = ic
+ }
+ l.front = ic
+ l.count++
+ return
+}
+
+func (l *idleList) popFront() {
+ ic := l.front
+ l.count--
+ if l.count == 0 {
+ l.front, l.back = nil, nil
+ } else {
+ ic.next.prev = nil
+ l.front = ic.next
+ }
+ ic.next, ic.prev = nil, nil
+}
+
+func (l *idleList) popBack() {
+ ic := l.back
+ l.count--
+ if l.count == 0 {
+ l.front, l.back = nil, nil
+ } else {
+ ic.prev.next = nil
+ l.back = ic.prev
+ }
+ ic.next, ic.prev = nil, nil
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/pool17.go b/vendor/github.com/garyburd/redigo/redis/pool17.go
new file mode 100644
index 0000000..57a2264
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/pool17.go
@@ -0,0 +1,35 @@
+// Copyright 2018 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// +build go1.7
+
+package redis
+
+import "context"
+
+// GetContext gets a connection using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before the
+// connection is complete, an error is returned. Any expiration on the context
+// will not affect the returned connection.
+//
+// If the function completes without error, then the application must close the
+// returned connection.
+func (p *Pool) GetContext(ctx context.Context) (Conn, error) {
+ c, err := p.get(ctx)
+ if err != nil {
+ return errorConnection{err}, err
+ }
+ return &pooledConnection{p: p, c: c}, nil
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/pubsub.go b/vendor/github.com/garyburd/redigo/redis/pubsub.go
new file mode 100644
index 0000000..f0ac825
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/pubsub.go
@@ -0,0 +1,157 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "time"
+)
+
+// Subscription represents a subscribe or unsubscribe notification.
+type Subscription struct {
+ // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe"
+ Kind string
+
+ // The channel that was changed.
+ Channel string
+
+ // The current number of subscriptions for connection.
+ Count int
+}
+
+// Message represents a message notification.
+type Message struct {
+ // The originating channel.
+ Channel string
+
+ // The message data.
+ Data []byte
+}
+
+// PMessage represents a pmessage notification.
+type PMessage struct {
+ // The matched pattern.
+ Pattern string
+
+ // The originating channel.
+ Channel string
+
+ // The message data.
+ Data []byte
+}
+
+// Pong represents a pubsub pong notification.
+type Pong struct {
+ Data string
+}
+
+// PubSubConn wraps a Conn with convenience methods for subscribers.
+type PubSubConn struct {
+ Conn Conn
+}
+
+// Close closes the connection.
+func (c PubSubConn) Close() error {
+ return c.Conn.Close()
+}
+
+// Subscribe subscribes the connection to the specified channels.
+func (c PubSubConn) Subscribe(channel ...interface{}) error {
+ c.Conn.Send("SUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// PSubscribe subscribes the connection to the given patterns.
+func (c PubSubConn) PSubscribe(channel ...interface{}) error {
+ c.Conn.Send("PSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// Unsubscribe unsubscribes the connection from the given channels, or from all
+// of them if none is given.
+func (c PubSubConn) Unsubscribe(channel ...interface{}) error {
+ c.Conn.Send("UNSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// PUnsubscribe unsubscribes the connection from the given patterns, or from all
+// of them if none is given.
+func (c PubSubConn) PUnsubscribe(channel ...interface{}) error {
+ c.Conn.Send("PUNSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// Ping sends a PING to the server with the specified data.
+//
+// The connection must be subscribed to at least one channel or pattern when
+// calling this method.
+func (c PubSubConn) Ping(data string) error {
+ c.Conn.Send("PING", data)
+ return c.Conn.Flush()
+}
+
+// Receive returns a pushed message as a Subscription, Message, PMessage, Pong
+// or error. The return value is intended to be used directly in a type switch
+// as illustrated in the PubSubConn example.
+func (c PubSubConn) Receive() interface{} {
+ return c.receiveInternal(c.Conn.Receive())
+}
+
+// ReceiveWithTimeout is like Receive, but it allows the application to
+// override the connection's default timeout.
+func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} {
+ return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout))
+}
+
+func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} {
+ reply, err := Values(replyArg, errArg)
+ if err != nil {
+ return err
+ }
+
+ var kind string
+ reply, err = Scan(reply, &kind)
+ if err != nil {
+ return err
+ }
+
+ switch kind {
+ case "message":
+ var m Message
+ if _, err := Scan(reply, &m.Channel, &m.Data); err != nil {
+ return err
+ }
+ return m
+ case "pmessage":
+ var pm PMessage
+ if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil {
+ return err
+ }
+ return pm
+ case "subscribe", "psubscribe", "unsubscribe", "punsubscribe":
+ s := Subscription{Kind: kind}
+ if _, err := Scan(reply, &s.Channel, &s.Count); err != nil {
+ return err
+ }
+ return s
+ case "pong":
+ var p Pong
+ if _, err := Scan(reply, &p.Data); err != nil {
+ return err
+ }
+ return p
+ }
+ return errors.New("redigo: unknown pubsub notification")
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/redis.go b/vendor/github.com/garyburd/redigo/redis/redis.go
new file mode 100644
index 0000000..141fa4a
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/redis.go
@@ -0,0 +1,117 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "time"
+)
+
+// Error represents an error returned in a command reply.
+type Error string
+
+func (err Error) Error() string { return string(err) }
+
+// Conn represents a connection to a Redis server.
+type Conn interface {
+ // Close closes the connection.
+ Close() error
+
+ // Err returns a non-nil value when the connection is not usable.
+ Err() error
+
+ // Do sends a command to the server and returns the received reply.
+ Do(commandName string, args ...interface{}) (reply interface{}, err error)
+
+ // Send writes the command to the client's output buffer.
+ Send(commandName string, args ...interface{}) error
+
+ // Flush flushes the output buffer to the Redis server.
+ Flush() error
+
+ // Receive receives a single reply from the Redis server
+ Receive() (reply interface{}, err error)
+}
+
+// Argument is the interface implemented by an object which wants to control how
+// the object is converted to Redis bulk strings.
+type Argument interface {
+ // RedisArg returns a value to be encoded as a bulk string per the
+ // conversions listed in the section 'Executing Commands'.
+ // Implementations should typically return a []byte or string.
+ RedisArg() interface{}
+}
+
+// Scanner is implemented by an object which wants to control its value is
+// interpreted when read from Redis.
+type Scanner interface {
+ // RedisScan assigns a value from a Redis value. The argument src is one of
+ // the reply types listed in the section `Executing Commands`.
+ //
+ // An error should be returned if the value cannot be stored without
+ // loss of information.
+ RedisScan(src interface{}) error
+}
+
+// ConnWithTimeout is an optional interface that allows the caller to override
+// a connection's default read timeout. This interface is useful for executing
+// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the
+// server.
+//
+// A connection's default read timeout is set with the DialReadTimeout dial
+// option. Applications should rely on the default timeout for commands that do
+// not block at the server.
+//
+// All of the Conn implementations in this package satisfy the ConnWithTimeout
+// interface.
+//
+// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify
+// use of this interface.
+type ConnWithTimeout interface {
+ Conn
+
+ // Do sends a command to the server and returns the received reply.
+ // The timeout overrides the read timeout set when dialing the
+ // connection.
+ DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error)
+
+ // Receive receives a single reply from the Redis server. The timeout
+ // overrides the read timeout set when dialing the connection.
+ ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error)
+}
+
+var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout")
+
+// DoWithTimeout executes a Redis command with the specified read timeout. If
+// the connection does not satisfy the ConnWithTimeout interface, then an error
+// is returned.
+func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) {
+ cwt, ok := c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.DoWithTimeout(timeout, cmd, args...)
+}
+
+// ReceiveWithTimeout receives a reply with the specified read timeout. If the
+// connection does not satisfy the ConnWithTimeout interface, then an error is
+// returned.
+func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) {
+ cwt, ok := c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.ReceiveWithTimeout(timeout)
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/reply.go b/vendor/github.com/garyburd/redigo/redis/reply.go
new file mode 100644
index 0000000..c2b3b2b
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/reply.go
@@ -0,0 +1,479 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+)
+
+// ErrNil indicates that a reply value is nil.
+var ErrNil = errors.New("redigo: nil returned")
+
+// Int is a helper that converts a command reply to an integer. If err is not
+// equal to nil, then Int returns 0, err. Otherwise, Int converts the
+// reply to an int as follows:
+//
+// Reply type Result
+// integer int(reply), nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Int(reply interface{}, err error) (int, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ x := int(reply)
+ if int64(x) != reply {
+ return 0, strconv.ErrRange
+ }
+ return x, nil
+ case []byte:
+ n, err := strconv.ParseInt(string(reply), 10, 0)
+ return int(n), err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply)
+}
+
+// Int64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+// Reply type Result
+// integer reply, nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Int64(reply interface{}, err error) (int64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ return reply, nil
+ case []byte:
+ n, err := strconv.ParseInt(string(reply), 10, 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply)
+}
+
+var errNegativeInt = errors.New("redigo: unexpected value for Uint64")
+
+// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+// Reply type Result
+// integer reply, nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Uint64(reply interface{}, err error) (uint64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ if reply < 0 {
+ return 0, errNegativeInt
+ }
+ return uint64(reply), nil
+ case []byte:
+ n, err := strconv.ParseUint(string(reply), 10, 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply)
+}
+
+// Float64 is a helper that converts a command reply to 64 bit float. If err is
+// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
+// the reply to an int as follows:
+//
+// Reply type Result
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Float64(reply interface{}, err error) (float64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ n, err := strconv.ParseFloat(string(reply), 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply)
+}
+
+// String is a helper that converts a command reply to a string. If err is not
+// equal to nil, then String returns "", err. Otherwise String converts the
+// reply to a string as follows:
+//
+// Reply type Result
+// bulk string string(reply), nil
+// simple string reply, nil
+// nil "", ErrNil
+// other "", error
+func String(reply interface{}, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ return string(reply), nil
+ case string:
+ return reply, nil
+ case nil:
+ return "", ErrNil
+ case Error:
+ return "", reply
+ }
+ return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply)
+}
+
+// Bytes is a helper that converts a command reply to a slice of bytes. If err
+// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
+// the reply to a slice of bytes as follows:
+//
+// Reply type Result
+// bulk string reply, nil
+// simple string []byte(reply), nil
+// nil nil, ErrNil
+// other nil, error
+func Bytes(reply interface{}, err error) ([]byte, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ return reply, nil
+ case string:
+ return []byte(reply), nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply)
+}
+
+// Bool is a helper that converts a command reply to a boolean. If err is not
+// equal to nil, then Bool returns false, err. Otherwise Bool converts the
+// reply to boolean as follows:
+//
+// Reply type Result
+// integer value != 0, nil
+// bulk string strconv.ParseBool(reply)
+// nil false, ErrNil
+// other false, error
+func Bool(reply interface{}, err error) (bool, error) {
+ if err != nil {
+ return false, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ return reply != 0, nil
+ case []byte:
+ return strconv.ParseBool(string(reply))
+ case nil:
+ return false, ErrNil
+ case Error:
+ return false, reply
+ }
+ return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
+}
+
+// MultiBulk is a helper that converts an array command reply to a []interface{}.
+//
+// Deprecated: Use Values instead.
+func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) }
+
+// Values is a helper that converts an array command reply to a []interface{}.
+// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
+// converts the reply as follows:
+//
+// Reply type Result
+// array reply, nil
+// nil nil, ErrNil
+// other nil, error
+func Values(reply interface{}, err error) ([]interface{}, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []interface{}:
+ return reply, nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply)
+}
+
+func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error {
+ if err != nil {
+ return err
+ }
+ switch reply := reply.(type) {
+ case []interface{}:
+ makeSlice(len(reply))
+ for i := range reply {
+ if reply[i] == nil {
+ continue
+ }
+ if err := assign(i, reply[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+ case nil:
+ return ErrNil
+ case Error:
+ return reply
+ }
+ return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply)
+}
+
+// Float64s is a helper that converts an array command reply to a []float64. If
+// err is not equal to nil, then Float64s returns nil, err. Nil array items are
+// converted to 0 in the output slice. Floats64 returns an error if an array
+// item is not a bulk string or nil.
+func Float64s(reply interface{}, err error) ([]float64, error) {
+ var result []float64
+ err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error {
+ p, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v)
+ }
+ f, err := strconv.ParseFloat(string(p), 64)
+ result[i] = f
+ return err
+ })
+ return result, err
+}
+
+// Strings is a helper that converts an array command reply to a []string. If
+// err is not equal to nil, then Strings returns nil, err. Nil array items are
+// converted to "" in the output slice. Strings returns an error if an array
+// item is not a bulk string or nil.
+func Strings(reply interface{}, err error) ([]string, error) {
+ var result []string
+ err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case string:
+ result[i] = v
+ return nil
+ case []byte:
+ result[i] = string(v)
+ return nil
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// ByteSlices is a helper that converts an array command reply to a [][]byte.
+// If err is not equal to nil, then ByteSlices returns nil, err. Nil array
+// items are stay nil. ByteSlices returns an error if an array item is not a
+// bulk string or nil.
+func ByteSlices(reply interface{}, err error) ([][]byte, error) {
+ var result [][]byte
+ err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error {
+ p, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v)
+ }
+ result[i] = p
+ return nil
+ })
+ return result, err
+}
+
+// Int64s is a helper that converts an array command reply to a []int64.
+// If err is not equal to nil, then Int64s returns nil, err. Nil array
+// items are stay nil. Int64s returns an error if an array item is not a
+// bulk string or nil.
+func Int64s(reply interface{}, err error) ([]int64, error) {
+ var result []int64
+ err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case int64:
+ result[i] = v
+ return nil
+ case []byte:
+ n, err := strconv.ParseInt(string(v), 10, 64)
+ result[i] = n
+ return err
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// Ints is a helper that converts an array command reply to a []in.
+// If err is not equal to nil, then Ints returns nil, err. Nil array
+// items are stay nil. Ints returns an error if an array item is not a
+// bulk string or nil.
+func Ints(reply interface{}, err error) ([]int, error) {
+ var result []int
+ err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case int64:
+ n := int(v)
+ if int64(n) != v {
+ return strconv.ErrRange
+ }
+ result[i] = n
+ return nil
+ case []byte:
+ n, err := strconv.Atoi(string(v))
+ result[i] = n
+ return err
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// StringMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
+// Requires an even number of values in result.
+func StringMap(result interface{}, err error) (map[string]string, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: StringMap expects even number of values result")
+ }
+ m := make(map[string]string, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, okKey := values[i].([]byte)
+ value, okValue := values[i+1].([]byte)
+ if !okKey || !okValue {
+ return nil, errors.New("redigo: StringMap key not a bulk string value")
+ }
+ m[string(key)] = string(value)
+ }
+ return m, nil
+}
+
+// IntMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]int. The HGETALL commands return replies in this format.
+// Requires an even number of values in result.
+func IntMap(result interface{}, err error) (map[string]int, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: IntMap expects even number of values result")
+ }
+ m := make(map[string]int, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, ok := values[i].([]byte)
+ if !ok {
+ return nil, errors.New("redigo: IntMap key not a bulk string value")
+ }
+ value, err := Int(values[i+1], nil)
+ if err != nil {
+ return nil, err
+ }
+ m[string(key)] = value
+ }
+ return m, nil
+}
+
+// Int64Map is a helper that converts an array of strings (alternating key, value)
+// into a map[string]int64. The HGETALL commands return replies in this format.
+// Requires an even number of values in result.
+func Int64Map(result interface{}, err error) (map[string]int64, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: Int64Map expects even number of values result")
+ }
+ m := make(map[string]int64, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, ok := values[i].([]byte)
+ if !ok {
+ return nil, errors.New("redigo: Int64Map key not a bulk string value")
+ }
+ value, err := Int64(values[i+1], nil)
+ if err != nil {
+ return nil, err
+ }
+ m[string(key)] = value
+ }
+ return m, nil
+}
+
+// Positions is a helper that converts an array of positions (lat, long)
+// into a [][2]float64. The GEOPOS command returns replies in this format.
+func Positions(result interface{}, err error) ([]*[2]float64, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ positions := make([]*[2]float64, len(values))
+ for i := range values {
+ if values[i] == nil {
+ continue
+ }
+ p, ok := values[i].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i])
+ }
+ if len(p) != 2 {
+ return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p))
+ }
+ lat, err := Float64(p[0], nil)
+ if err != nil {
+ return nil, err
+ }
+ long, err := Float64(p[1], nil)
+ if err != nil {
+ return nil, err
+ }
+ positions[i] = &[2]float64{lat, long}
+ }
+ return positions, nil
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/scan.go b/vendor/github.com/garyburd/redigo/redis/scan.go
new file mode 100644
index 0000000..ef9551b
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/scan.go
@@ -0,0 +1,585 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+func ensureLen(d reflect.Value, n int) {
+ if n > d.Cap() {
+ d.Set(reflect.MakeSlice(d.Type(), n, n))
+ } else {
+ d.SetLen(n)
+ }
+}
+
+func cannotConvert(d reflect.Value, s interface{}) error {
+ var sname string
+ switch s.(type) {
+ case string:
+ sname = "Redis simple string"
+ case Error:
+ sname = "Redis error"
+ case int64:
+ sname = "Redis integer"
+ case []byte:
+ sname = "Redis bulk string"
+ case []interface{}:
+ sname = "Redis array"
+ default:
+ sname = reflect.TypeOf(s).String()
+ }
+ return fmt.Errorf("cannot convert from %s to %s", sname, d.Type())
+}
+
+func convertAssignBulkString(d reflect.Value, s []byte) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Float32, reflect.Float64:
+ var x float64
+ x, err = strconv.ParseFloat(string(s), d.Type().Bits())
+ d.SetFloat(x)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ var x int64
+ x, err = strconv.ParseInt(string(s), 10, d.Type().Bits())
+ d.SetInt(x)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ var x uint64
+ x, err = strconv.ParseUint(string(s), 10, d.Type().Bits())
+ d.SetUint(x)
+ case reflect.Bool:
+ var x bool
+ x, err = strconv.ParseBool(string(s))
+ d.SetBool(x)
+ case reflect.String:
+ d.SetString(string(s))
+ case reflect.Slice:
+ if d.Type().Elem().Kind() != reflect.Uint8 {
+ err = cannotConvert(d, s)
+ } else {
+ d.SetBytes(s)
+ }
+ default:
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignInt(d reflect.Value, s int64) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ d.SetInt(s)
+ if d.Int() != s {
+ err = strconv.ErrRange
+ d.SetInt(0)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if s < 0 {
+ err = strconv.ErrRange
+ } else {
+ x := uint64(s)
+ d.SetUint(x)
+ if d.Uint() != x {
+ err = strconv.ErrRange
+ d.SetUint(0)
+ }
+ }
+ case reflect.Bool:
+ d.SetBool(s != 0)
+ default:
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignValue(d reflect.Value, s interface{}) (err error) {
+ if d.Kind() != reflect.Ptr {
+ if d.CanAddr() {
+ d2 := d.Addr()
+ if d2.CanInterface() {
+ if scanner, ok := d2.Interface().(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+ }
+ }
+ } else if d.CanInterface() {
+ // Already a reflect.Ptr
+ if d.IsNil() {
+ d.Set(reflect.New(d.Type().Elem()))
+ }
+ if scanner, ok := d.Interface().(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+ }
+
+ switch s := s.(type) {
+ case []byte:
+ err = convertAssignBulkString(d, s)
+ case int64:
+ err = convertAssignInt(d, s)
+ default:
+ err = cannotConvert(d, s)
+ }
+ return err
+}
+
+func convertAssignArray(d reflect.Value, s []interface{}) error {
+ if d.Type().Kind() != reflect.Slice {
+ return cannotConvert(d, s)
+ }
+ ensureLen(d, len(s))
+ for i := 0; i < len(s); i++ {
+ if err := convertAssignValue(d.Index(i), s[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func convertAssign(d interface{}, s interface{}) (err error) {
+ if scanner, ok := d.(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+
+ // Handle the most common destination types using type switches and
+ // fall back to reflection for all other types.
+ switch s := s.(type) {
+ case nil:
+ // ignore
+ case []byte:
+ switch d := d.(type) {
+ case *string:
+ *d = string(s)
+ case *int:
+ *d, err = strconv.Atoi(string(s))
+ case *bool:
+ *d, err = strconv.ParseBool(string(s))
+ case *[]byte:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignBulkString(d.Elem(), s)
+ }
+ }
+ case int64:
+ switch d := d.(type) {
+ case *int:
+ x := int(s)
+ if int64(x) != s {
+ err = strconv.ErrRange
+ x = 0
+ }
+ *d = x
+ case *bool:
+ *d = s != 0
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignInt(d.Elem(), s)
+ }
+ }
+ case string:
+ switch d := d.(type) {
+ case *string:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ err = cannotConvert(reflect.ValueOf(d), s)
+ }
+ case []interface{}:
+ switch d := d.(type) {
+ case *[]interface{}:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignArray(d.Elem(), s)
+ }
+ }
+ case Error:
+ err = s
+ default:
+ err = cannotConvert(reflect.ValueOf(d), s)
+ }
+ return
+}
+
+// Scan copies from src to the values pointed at by dest.
+//
+// Scan uses RedisScan if available otherwise:
+//
+// The values pointed at by dest must be an integer, float, boolean, string,
+// []byte, interface{} or slices of these types. Scan uses the standard strconv
+// package to convert bulk strings to numeric and boolean types.
+//
+// If a dest value is nil, then the corresponding src value is skipped.
+//
+// If a src element is nil, then the corresponding dest value is not modified.
+//
+// To enable easy use of Scan in a loop, Scan returns the slice of src
+// following the copied values.
+func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) {
+ if len(src) < len(dest) {
+ return nil, errors.New("redigo.Scan: array short")
+ }
+ var err error
+ for i, d := range dest {
+ err = convertAssign(d, src[i])
+ if err != nil {
+ err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err)
+ break
+ }
+ }
+ return src[len(dest):], err
+}
+
+type fieldSpec struct {
+ name string
+ index []int
+ omitEmpty bool
+}
+
+type structSpec struct {
+ m map[string]*fieldSpec
+ l []*fieldSpec
+}
+
+func (ss *structSpec) fieldSpec(name []byte) *fieldSpec {
+ return ss.m[string(name)]
+}
+
+func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) {
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ switch {
+ case f.PkgPath != "" && !f.Anonymous:
+ // Ignore unexported fields.
+ case f.Anonymous:
+ // TODO: Handle pointers. Requires change to decoder and
+ // protection against infinite recursion.
+ if f.Type.Kind() == reflect.Struct {
+ compileStructSpec(f.Type, depth, append(index, i), ss)
+ }
+ default:
+ fs := &fieldSpec{name: f.Name}
+ tag := f.Tag.Get("redis")
+ p := strings.Split(tag, ",")
+ if len(p) > 0 {
+ if p[0] == "-" {
+ continue
+ }
+ if len(p[0]) > 0 {
+ fs.name = p[0]
+ }
+ for _, s := range p[1:] {
+ switch s {
+ case "omitempty":
+ fs.omitEmpty = true
+ default:
+ panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name()))
+ }
+ }
+ }
+ d, found := depth[fs.name]
+ if !found {
+ d = 1 << 30
+ }
+ switch {
+ case len(index) == d:
+ // At same depth, remove from result.
+ delete(ss.m, fs.name)
+ j := 0
+ for i := 0; i < len(ss.l); i++ {
+ if fs.name != ss.l[i].name {
+ ss.l[j] = ss.l[i]
+ j += 1
+ }
+ }
+ ss.l = ss.l[:j]
+ case len(index) < d:
+ fs.index = make([]int, len(index)+1)
+ copy(fs.index, index)
+ fs.index[len(index)] = i
+ depth[fs.name] = len(index)
+ ss.m[fs.name] = fs
+ ss.l = append(ss.l, fs)
+ }
+ }
+ }
+}
+
+var (
+ structSpecMutex sync.RWMutex
+ structSpecCache = make(map[reflect.Type]*structSpec)
+ defaultFieldSpec = &fieldSpec{}
+)
+
+func structSpecForType(t reflect.Type) *structSpec {
+
+ structSpecMutex.RLock()
+ ss, found := structSpecCache[t]
+ structSpecMutex.RUnlock()
+ if found {
+ return ss
+ }
+
+ structSpecMutex.Lock()
+ defer structSpecMutex.Unlock()
+ ss, found = structSpecCache[t]
+ if found {
+ return ss
+ }
+
+ ss = &structSpec{m: make(map[string]*fieldSpec)}
+ compileStructSpec(t, make(map[string]int), nil, ss)
+ structSpecCache[t] = ss
+ return ss
+}
+
+var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct")
+
+// ScanStruct scans alternating names and values from src to a struct. The
+// HGETALL and CONFIG GET commands return replies in this format.
+//
+// ScanStruct uses exported field names to match values in the response. Use
+// 'redis' field tag to override the name:
+//
+// Field int `redis:"myName"`
+//
+// Fields with the tag redis:"-" are ignored.
+//
+// Each field uses RedisScan if available otherwise:
+// Integer, float, boolean, string and []byte fields are supported. Scan uses the
+// standard strconv package to convert bulk string values to numeric and
+// boolean types.
+//
+// If a src element is nil, then the corresponding field is not modified.
+func ScanStruct(src []interface{}, dest interface{}) error {
+ d := reflect.ValueOf(dest)
+ if d.Kind() != reflect.Ptr || d.IsNil() {
+ return errScanStructValue
+ }
+ d = d.Elem()
+ if d.Kind() != reflect.Struct {
+ return errScanStructValue
+ }
+ ss := structSpecForType(d.Type())
+
+ if len(src)%2 != 0 {
+ return errors.New("redigo.ScanStruct: number of values not a multiple of 2")
+ }
+
+ for i := 0; i < len(src); i += 2 {
+ s := src[i+1]
+ if s == nil {
+ continue
+ }
+ name, ok := src[i].([]byte)
+ if !ok {
+ return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i)
+ }
+ fs := ss.fieldSpec(name)
+ if fs == nil {
+ continue
+ }
+ if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+ return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err)
+ }
+ }
+ return nil
+}
+
+var (
+ errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct")
+)
+
+// ScanSlice scans src to the slice pointed to by dest. The elements the dest
+// slice must be integer, float, boolean, string, struct or pointer to struct
+// values.
+//
+// Struct fields must be integer, float, boolean or string values. All struct
+// fields are used unless a subset is specified using fieldNames.
+func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error {
+ d := reflect.ValueOf(dest)
+ if d.Kind() != reflect.Ptr || d.IsNil() {
+ return errScanSliceValue
+ }
+ d = d.Elem()
+ if d.Kind() != reflect.Slice {
+ return errScanSliceValue
+ }
+
+ isPtr := false
+ t := d.Type().Elem()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ isPtr = true
+ t = t.Elem()
+ }
+
+ if t.Kind() != reflect.Struct {
+ ensureLen(d, len(src))
+ for i, s := range src {
+ if s == nil {
+ continue
+ }
+ if err := convertAssignValue(d.Index(i), s); err != nil {
+ return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err)
+ }
+ }
+ return nil
+ }
+
+ ss := structSpecForType(t)
+ fss := ss.l
+ if len(fieldNames) > 0 {
+ fss = make([]*fieldSpec, len(fieldNames))
+ for i, name := range fieldNames {
+ fss[i] = ss.m[name]
+ if fss[i] == nil {
+ return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name)
+ }
+ }
+ }
+
+ if len(fss) == 0 {
+ return errors.New("redigo.ScanSlice: no struct fields")
+ }
+
+ n := len(src) / len(fss)
+ if n*len(fss) != len(src) {
+ return errors.New("redigo.ScanSlice: length not a multiple of struct field count")
+ }
+
+ ensureLen(d, n)
+ for i := 0; i < n; i++ {
+ d := d.Index(i)
+ if isPtr {
+ if d.IsNil() {
+ d.Set(reflect.New(t))
+ }
+ d = d.Elem()
+ }
+ for j, fs := range fss {
+ s := src[i*len(fss)+j]
+ if s == nil {
+ continue
+ }
+ if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+ return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err)
+ }
+ }
+ }
+ return nil
+}
+
+// Args is a helper for constructing command arguments from structured values.
+type Args []interface{}
+
+// Add returns the result of appending value to args.
+func (args Args) Add(value ...interface{}) Args {
+ return append(args, value...)
+}
+
+// AddFlat returns the result of appending the flattened value of v to args.
+//
+// Maps are flattened by appending the alternating keys and map values to args.
+//
+// Slices are flattened by appending the slice elements to args.
+//
+// Structs are flattened by appending the alternating names and values of
+// exported fields to args. If v is a nil struct pointer, then nothing is
+// appended. The 'redis' field tag overrides struct field names. See ScanStruct
+// for more information on the use of the 'redis' field tag.
+//
+// Other types are appended to args as is.
+func (args Args) AddFlat(v interface{}) Args {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Struct:
+ args = flattenStruct(args, rv)
+ case reflect.Slice:
+ for i := 0; i < rv.Len(); i++ {
+ args = append(args, rv.Index(i).Interface())
+ }
+ case reflect.Map:
+ for _, k := range rv.MapKeys() {
+ args = append(args, k.Interface(), rv.MapIndex(k).Interface())
+ }
+ case reflect.Ptr:
+ if rv.Type().Elem().Kind() == reflect.Struct {
+ if !rv.IsNil() {
+ args = flattenStruct(args, rv.Elem())
+ }
+ } else {
+ args = append(args, v)
+ }
+ default:
+ args = append(args, v)
+ }
+ return args
+}
+
+func flattenStruct(args Args, v reflect.Value) Args {
+ ss := structSpecForType(v.Type())
+ for _, fs := range ss.l {
+ fv := v.FieldByIndex(fs.index)
+ if fs.omitEmpty {
+ var empty = false
+ switch fv.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ empty = fv.Len() == 0
+ case reflect.Bool:
+ empty = !fv.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ empty = fv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ empty = fv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ empty = fv.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ empty = fv.IsNil()
+ }
+ if empty {
+ continue
+ }
+ }
+ args = append(args, fs.name, fv.Interface())
+ }
+ return args
+}
diff --git a/vendor/github.com/garyburd/redigo/redis/script.go b/vendor/github.com/garyburd/redigo/redis/script.go
new file mode 100644
index 0000000..0ef1c82
--- /dev/null
+++ b/vendor/github.com/garyburd/redigo/redis/script.go
@@ -0,0 +1,91 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+// Script encapsulates the source, hash and key count for a Lua script. See
+// http://redis.io/commands/eval for information on scripts in Redis.
+type Script struct {
+ keyCount int
+ src string
+ hash string
+}
+
+// NewScript returns a new script object. If keyCount is greater than or equal
+// to zero, then the count is automatically inserted in the EVAL command
+// argument list. If keyCount is less than zero, then the application supplies
+// the count as the first value in the keysAndArgs argument to the Do, Send and
+// SendHash methods.
+func NewScript(keyCount int, src string) *Script {
+ h := sha1.New()
+ io.WriteString(h, src)
+ return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))}
+}
+
+func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} {
+ var args []interface{}
+ if s.keyCount < 0 {
+ args = make([]interface{}, 1+len(keysAndArgs))
+ args[0] = spec
+ copy(args[1:], keysAndArgs)
+ } else {
+ args = make([]interface{}, 2+len(keysAndArgs))
+ args[0] = spec
+ args[1] = s.keyCount
+ copy(args[2:], keysAndArgs)
+ }
+ return args
+}
+
+// Hash returns the script hash.
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+// Do evaluates the script. Under the covers, Do optimistically evaluates the
+// script using the EVALSHA command. If the command fails because the script is
+// not loaded, then Do evaluates the script using the EVAL command (thus
+// causing the script to load).
+func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) {
+ v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...)
+ if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") {
+ v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...)
+ }
+ return v, err
+}
+
+// SendHash evaluates the script without waiting for the reply. The script is
+// evaluated with the EVALSHA command. The application must ensure that the
+// script is loaded by a previous call to Send, Do or Load methods.
+func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error {
+ return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...)
+}
+
+// Send evaluates the script without waiting for the reply.
+func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error {
+ return c.Send("EVAL", s.args(s.src, keysAndArgs)...)
+}
+
+// Load loads the script without evaluating it.
+func (s *Script) Load(c Conn) error {
+ _, err := c.Do("SCRIPT", "LOAD", s.src)
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
new file mode 100644
index 0000000..2de28da
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.gitignore
@@ -0,0 +1,9 @@
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+Icon?
+ehthumbs.db
+Thumbs.db
+.idea
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml
new file mode 100644
index 0000000..cc1268c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml
@@ -0,0 +1,107 @@
+sudo: false
+language: go
+go:
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - master
+
+before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+
+before_script:
+ - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
+ - sudo service mysql restart
+ - .travis/wait_mysql.sh
+ - mysql -e 'create database gotest;'
+
+matrix:
+ include:
+ - env: DB=MYSQL8
+ sudo: required
+ dist: trusty
+ go: 1.10.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mysql:8.0
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+ - cp .travis/docker.cnf ~/.my.cnf
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MYSQL57
+ sudo: required
+ dist: trusty
+ go: 1.10.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mysql:5.7
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+ - cp .travis/docker.cnf ~/.my.cnf
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MARIA55
+ sudo: required
+ dist: trusty
+ go: 1.10.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mariadb:5.5
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+ - cp .travis/docker.cnf ~/.my.cnf
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MARIA10_1
+ sudo: required
+ dist: trusty
+ go: 1.10.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mariadb:10.1
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+ - cp .travis/docker.cnf ~/.my.cnf
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+script:
+ - go test -v -covermode=count -coverprofile=coverage.out
+ - go vet ./...
+ - .travis/gofmt.sh
+after_script:
+ - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
new file mode 100644
index 0000000..73ff68f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -0,0 +1,89 @@
+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
+
+# If you are submitting a patch, please add your name or the name of the
+# organization which holds the copyright to this list in alphabetical order.
+
+# Names should be added to this file as
+# Name
+# The email address is not required for organizations.
+# Please keep the list sorted.
+
+
+# Individual Persons
+
+Aaron Hopkins
+Achille Roussel
+Alexey Palazhchenko
+Andrew Reid
+Arne Hormann
+Asta Xie
+Bulat Gaifullin
+Carlos Nieto
+Chris Moos
+Craig Wilson
+Daniel Montoya
+Daniel Nichter
+Daniël van Eeden
+Dave Protasowski
+DisposaBoy
+Egor Smolyakov
+Evan Shaw
+Frederick Mayle
+Gustavo Kristic
+Hajime Nakagami
+Hanno Braun
+Henri Yandell
+Hirotaka Yamamoto
+ICHINOSE Shogo
+INADA Naoki
+Jacek Szwec
+James Harr
+Jeff Hodges
+Jeffrey Charles
+Jian Zhen
+Joshua Prunier
+Julien Lefevre
+Julien Schmidt
+Justin Li
+Justin Nuß
+Kamil Dziedzic
+Kevin Malachowski
+Kieron Woodhouse
+Lennart Rudolph
+Leonardo YongUk Kim
+Linh Tran Tuan
+Lion Yang
+Luca Looz
+Lucas Liu
+Luke Scott
+Maciej Zimnoch
+Michael Woolnough
+Nicola Peduzzi
+Olivier Mengué
+oscarzhao
+Paul Bonser
+Peter Schultz
+Rebecca Chin
+Reed Allman
+Richard Wilkes
+Robert Russell
+Runrioter Wung
+Shuode Li
+Soroush Pour
+Stan Putrya
+Stanley Gunawan
+Xiangyu Hu
+Xiaobing Jiang
+Xiuming Chen
+Zhenye Xie
+
+# Organizations
+
+Barracuda Networks, Inc.
+Counting Ltd.
+Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Percona LLC
+Pivotal Inc.
+Stripe Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
new file mode 100644
index 0000000..ce1b533
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -0,0 +1,178 @@
+## Version 1.4.1 (2018-11-14)
+
+Bugfixes:
+
+ - Fix TIME format for binary columns (#818)
+ - Fix handling of empty auth plugin names (#835)
+ - Fix caching_sha2_password with empty password (#826)
+ - Fix canceled context broke mysqlConn (#862)
+ - Fix OldAuthSwitchRequest support (#870)
+ - Fix Auth Response packet for cleartext password (#887)
+
+## Version 1.4 (2018-06-03)
+
+Changes:
+
+ - Documentation fixes (#530, #535, #567)
+ - Refactoring (#575, #579, #580, #581, #603, #615, #704)
+ - Cache column names (#444)
+ - Sort the DSN parameters in DSNs generated from a config (#637)
+ - Allow native password authentication by default (#644)
+ - Use the default port if it is missing in the DSN (#668)
+ - Removed the `strict` mode (#676)
+ - Do not query `max_allowed_packet` by default (#680)
+ - Dropped support Go 1.6 and lower (#696)
+ - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
+ - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
+ - Improved the compatibility of the authentication system (#807)
+
+New Features:
+
+ - Multi-Results support (#537)
+ - `rejectReadOnly` DSN option (#604)
+ - `context.Context` support (#608, #612, #627, #761)
+ - Transaction isolation level support (#619, #744)
+ - Read-Only transactions support (#618, #634)
+ - `NewConfig` function which initializes a config with default values (#679)
+ - Implemented the `ColumnType` interfaces (#667, #724)
+ - Support for custom string types in `ConvertValue` (#623)
+ - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
+ - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
+ - Implemented `driver.SessionResetter` (#779)
+ - `sha256_password` authentication plugin support (#808)
+
+Bugfixes:
+
+ - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
+ - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
+ - Removed columns definition cache since it sometimes cached invalid data (#592)
+ - Don't mutate registered TLS configs (#600)
+ - Make RegisterTLSConfig concurrency-safe (#613)
+ - Handle missing auth data in the handshake packet correctly (#646)
+ - Do not retry queries when data was written to avoid data corruption (#302, #736)
+ - Cache the connection pointer for error handling before invalidating it (#678)
+ - Fixed imports for appengine/cloudsql (#700)
+ - Fix sending STMT_LONG_DATA for 0 byte data (#734)
+ - Set correct capacity for []bytes read from length-encoded strings (#766)
+ - Make RegisterDial concurrency-safe (#773)
+
+
+## Version 1.3 (2016-12-01)
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
+
+
+## Version 1.2 (2014-06-03)
+
+Changes:
+
+ - We switched back to a "rolling release". `go get` installs the current master branch again
+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
+ - Exported errors to allow easy checking from application code
+ - Enabled TCP Keepalives on TCP connections
+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
+ - The DSN parser also checks for a missing separating slash
+ - Faster binary date / datetime to string formatting
+ - Also exported the MySQLWarning type
+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
+ - writePacket() automatically writes the packet size to the header
+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+
+New Features:
+
+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
+ - Logging of critical errors is configurable with `SetLogger`
+ - Google CloudSQL support
+
+Bugfixes:
+
+ - Allow more than 32 parameters in prepared statements
+ - Various old_password fixes
+ - Fixed TestConcurrent test to pass Go's race detection
+ - Fixed appendLengthEncodedInteger for large numbers
+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
+
+
+## Version 1.1 (2013-11-02)
+
+Changes:
+
+ - Go-MySQL-Driver now requires Go 1.1
+ - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
+ - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
+ - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
+ - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
+ - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
+ - Optimized the buffer for reading
+ - stmt.Query now caches column metadata
+ - New Logo
+ - Changed the copyright header to include all contributors
+ - Improved the LOAD INFILE documentation
+ - The driver struct is now exported to make the driver directly accessible
+ - Refactored the driver tests
+ - Added more benchmarks and moved all to a separate file
+ - Other small refactoring
+
+New Features:
+
+ - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
+ - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
+ - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
+
+Bugfixes:
+
+ - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
+ - Convert to DB timezone when inserting `time.Time`
+ - Splitted packets (more than 16MB) are now merged correctly
+ - Fixed false positive `io.EOF` errors when the data was fully read
+ - Avoid panics on reuse of closed connections
+ - Fixed empty string producing false nil values
+ - Fixed sign byte for positive TIME fields
+
+
+## Version 1.0 (2013-05-14)
+
+Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
new file mode 100644
index 0000000..8fe16bc
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+# Contributing Guidelines
+
+## Reporting Issues
+
+Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
+
+## Contributing Code
+
+By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
+Don't forget to add yourself to the AUTHORS file.
+
+### Code Review
+
+Everyone is invited to review and comment on pull requests.
+If it looks fine to you, comment with "LGTM" (Looks good to me).
+
+If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
+
+Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
+
+## Development Ideas
+
+If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
new file mode 100644
index 0000000..14e2f77
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
new file mode 100644
index 0000000..2e9b07e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -0,0 +1,490 @@
+# Go-MySQL-Driver
+
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
+
+![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
+
+---------------------------------------
+ * [Features](#features)
+ * [Requirements](#requirements)
+ * [Installation](#installation)
+ * [Usage](#usage)
+ * [DSN (Data Source Name)](#dsn-data-source-name)
+ * [Password](#password)
+ * [Protocol](#protocol)
+ * [Address](#address)
+ * [Parameters](#parameters)
+ * [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
+ * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
+ * [time.Time support](#timetime-support)
+ * [Unicode support](#unicode-support)
+ * [Testing / Development](#testing--development)
+ * [License](#license)
+
+---------------------------------------
+
+## Features
+ * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
+ * Native Go implementation. No C-bindings, just pure Go
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Automatic handling of broken connections
+ * Automatic Connection Pooling *(by database/sql package)*
+ * Supports queries larger than 16MB
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
+ * Intelligent `LONG DATA` handling in prepared statements
+ * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
+ * Optional `time.Time` parsing
+ * Optional placeholder interpolation
+
+## Requirements
+ * Go 1.7 or higher. We aim to support the 3 latest versions of Go.
+ * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+---------------------------------------
+
+## Installation
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
+```bash
+$ go get -u github.com/go-sql-driver/mysql
+```
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
+
+## Usage
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
+
+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
+```go
+import "database/sql"
+import _ "github.com/go-sql-driver/mysql"
+
+db, err := sql.Open("mysql", "user:password@/dbname")
+```
+
+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+
+
+### DSN (Data Source Name)
+
+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
+```
+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
+```
+
+A DSN in its fullest form:
+```
+username:password@protocol(address)/dbname?param=value
+```
+
+Except for the databasename, all values are optional. So the minimal DSN is:
+```
+/dbname
+```
+
+If you do not want to preselect a database, leave `dbname` empty:
+```
+/
+```
+This has the same effect as an empty DSN string:
+```
+
+```
+
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
+#### Password
+Passwords can consist of any character. Escaping is **not** necessary.
+
+#### Protocol
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+
+#### Address
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+
+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
+
+#### Parameters
+*Parameters are case-sensitive!*
+
+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
+
+##### `allowAllFiles`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+
+##### `allowCleartextPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+##### `allowNativePasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
+
+##### `allowOldPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
+
+##### `charset`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+
+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
+Unless you need the fallback behavior, please use `collation` instead.
+
+##### `collation`
+
+```
+Type: string
+Valid Values:
+Default: utf8_general_ci
+```
+
+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
+
+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
+
+##### `clientFoundRows`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+
+##### `columnsWithAlias`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+
+##### `loc`
+
+```
+Type: string
+Valid Values:
+Default: UTC
+```
+
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
+
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+
+##### `maxAllowedPacket`
+```
+Type: decimal number
+Default: 4194304
+```
+
+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+
+##### `multiStatements`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
+
+##### `parseTime`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
+
+
+##### `readTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+##### `rejectReadOnly`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
+
+
+##### `serverPubKey`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
+Public keys are used to transmit encrypted data, e.g. for authentication.
+If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
+
+
+##### `timeout`
+
+```
+Type: duration
+Default: OS default
+```
+
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### `tls`
+
+```
+Type: bool / string
+Valid Values: true, false, skip-verify,
+Default: false
+```
+
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
+
+##### `writeTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### System Variables
+
+Any other parameters are interpreted as system variables:
+ * `=`: `SET =`
+ * `=`: `SET =`
+ * `=%27%27`: `SET =''`
+
+Rules:
+* The values for string variables must be quoted with `'`.
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`).
+
+Examples:
+ * `autocommit=1`: `SET autocommit=1`
+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+ * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
+
+
+#### Examples
+```
+user@unix(/path/to/socket)/dbname
+```
+
+```
+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
+```
+
+```
+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
+```
+
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
+```
+user:password@/dbname?sql_mode=TRADITIONAL
+```
+
+TCP via IPv6:
+```
+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
+```
+
+TCP on a remote host, e.g. Amazon RDS:
+```
+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
+```
+
+Google Cloud SQL on App Engine (First Generation MySQL Server):
+```
+user@cloudsql(project-id:instance-name)/dbname
+```
+
+Google Cloud SQL on App Engine (Second Generation MySQL Server):
+```
+user@cloudsql(project-id:regionname:instance-name)/dbname
+```
+
+TCP using default port (3306) on localhost:
+```
+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
+```
+
+Use the default protocol (tcp) and host (localhost:3306):
+```
+user:password@/dbname
+```
+
+No Database preselected:
+```
+user:password@/
+```
+
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
+### `LOAD DATA LOCAL INFILE` support
+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
+```go
+import "github.com/go-sql-driver/mysql"
+```
+
+Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
+
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+
+
+### `time.Time` support
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
+
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+
+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
+
+Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
+
+
+### Unicode support
+Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
+
+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+
+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+
+See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
+
+## Testing / Development
+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
+
+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
+
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
+
+---------------------------------------
+
+## License
+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+Mozilla summarizes the license scope as follows:
+> MPL: The copyleft applies to any files containing MPLed code.
+
+
+That means:
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
+
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
+
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
+
+![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
+
diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go
new file mode 100644
index 0000000..be41f2e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/appengine.go
@@ -0,0 +1,19 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build appengine
+
+package mysql
+
+import (
+ "google.golang.org/appengine/cloudsql"
+)
+
+func init() {
+ RegisterDial("cloudsql", cloudsql.Dial)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
new file mode 100644
index 0000000..14f678a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -0,0 +1,420 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/pem"
+ "sync"
+)
+
+// server pub keys registry
+var (
+ serverPubKeyLock sync.RWMutex
+ serverPubKeyRegistry map[string]*rsa.PublicKey
+)
+
+// RegisterServerPubKey registers a server RSA public key which can be used to
+// send data in a secure manner to the server without receiving the public key
+// in a potentially insecure way from the server first.
+// Registered keys can afterwards be used adding serverPubKey= to the DSN.
+//
+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
+// after registering it and may not be modified.
+//
+// data, err := ioutil.ReadFile("mykey.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// block, _ := pem.Decode(data)
+// if block == nil || block.Type != "PUBLIC KEY" {
+// log.Fatal("failed to decode PEM block containing public key")
+// }
+//
+// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
+// mysql.RegisterServerPubKey("mykey", rsaPubKey)
+// } else {
+// log.Fatal("not a RSA public key")
+// }
+//
+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry == nil {
+ serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
+ }
+
+ serverPubKeyRegistry[name] = pubKey
+ serverPubKeyLock.Unlock()
+}
+
+// DeregisterServerPubKey removes the public key registered with the given name.
+func DeregisterServerPubKey(name string) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry != nil {
+ delete(serverPubKeyRegistry, name)
+ }
+ serverPubKeyLock.Unlock()
+}
+
+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
+ serverPubKeyLock.RLock()
+ if v, ok := serverPubKeyRegistry[name]; ok {
+ pubKey = v
+ }
+ serverPubKeyLock.RUnlock()
+ return
+}
+
+// Hash password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Hash password using insecure pre 4.1 method
+func scrambleOldPassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ scramble = scramble[:8]
+
+ hashPw := pwHash([]byte(password))
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+// Hash password using 4.1+ method (SHA1)
+func scramblePassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write([]byte(password))
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Hash password using MySQL 8+ method (SHA256)
+func scrambleSHA256Password(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
+
+ crypt := sha256.New()
+ crypt.Write([]byte(password))
+ message1 := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1)
+ message1Hash := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1Hash)
+ crypt.Write(scramble)
+ message2 := crypt.Sum(nil)
+
+ for i := range message1 {
+ message1[i] ^= message2[i]
+ }
+
+ return message1
+}
+
+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
+ plain := make([]byte, len(password)+1)
+ copy(plain, password)
+ for i := range plain {
+ j := i % len(seed)
+ plain[i] ^= seed[j]
+ }
+ sha1 := sha1.New()
+ return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
+}
+
+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
+ enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
+ if err != nil {
+ return err
+ }
+ return mc.writeAuthSwitchPacket(enc)
+}
+
+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
+ switch plugin {
+ case "caching_sha2_password":
+ authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
+ return authResp, nil
+
+ case "mysql_old_password":
+ if !mc.cfg.AllowOldPasswords {
+ return nil, ErrOldPassword
+ }
+ // Note: there are edge cases where this should work but doesn't;
+ // this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+ authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
+ return authResp, nil
+
+ case "mysql_clear_password":
+ if !mc.cfg.AllowCleartextPasswords {
+ return nil, ErrCleartextPassword
+ }
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ return append([]byte(mc.cfg.Passwd), 0), nil
+
+ case "mysql_native_password":
+ if !mc.cfg.AllowNativePasswords {
+ return nil, ErrNativePassword
+ }
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
+ return authResp, nil
+
+ case "sha256_password":
+ if len(mc.cfg.Passwd) == 0 {
+ return []byte{0}, nil
+ }
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ return append([]byte(mc.cfg.Passwd), 0), nil
+ }
+
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ return []byte{1}, nil
+ }
+
+ // encrypted password
+ enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
+ return enc, err
+
+ default:
+ errLog.Print("unknown auth plugin:", plugin)
+ return nil, ErrUnknownPlugin
+ }
+}
+
+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
+ // Read Result Packet
+ authData, newPlugin, err := mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // handle auth plugin switch, if requested
+ if newPlugin != "" {
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if authData == nil {
+ authData = oldAuthData
+ } else {
+ // copy data from read buffer to owned slice
+ copy(oldAuthData, authData)
+ }
+
+ plugin = newPlugin
+
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ return err
+ }
+ if err = mc.writeAuthSwitchPacket(authResp); err != nil {
+ return err
+ }
+
+ // Read Result Packet
+ authData, newPlugin, err = mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // Do not allow to change the auth plugin more than once
+ if newPlugin != "" {
+ return ErrMalformPkt
+ }
+ }
+
+ switch plugin {
+
+ // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ case "caching_sha2_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ case 1:
+ switch authData[0] {
+ case cachingSha2PasswordFastAuthSuccess:
+ if err = mc.readResultOK(); err == nil {
+ return nil // auth successful
+ }
+
+ case cachingSha2PasswordPerformFullAuthentication:
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
+ if err != nil {
+ return err
+ }
+ } else {
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ data := mc.buf.takeSmallBuffer(4 + 1)
+ data[4] = cachingSha2PasswordRequestPublicKey
+ mc.writePacket(data)
+
+ // parse public key
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ block, _ := pem.Decode(data[1:])
+ pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+ pubKey = pkix.(*rsa.PublicKey)
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pubKey)
+ if err != nil {
+ return err
+ }
+ }
+ return mc.readResultOK()
+
+ default:
+ return ErrMalformPkt
+ }
+ default:
+ return ErrMalformPkt
+ }
+
+ case "sha256_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ default:
+ block, _ := pem.Decode(authData)
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
+ if err != nil {
+ return err
+ }
+ return mc.readResultOK()
+ }
+
+ default:
+ return nil // auth successful
+ }
+
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
new file mode 100644
index 0000000..eb4748b
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -0,0 +1,147 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "io"
+ "net"
+ "time"
+)
+
+const defaultBufSize = 4096
+
+// A buffer which is used for both reading and writing.
+// This is possible since communication on each connection is synchronous.
+// In other words, we can't write and read simultaneously on the same connection.
+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
+// Also highly optimized for this particular use case.
+type buffer struct {
+ buf []byte
+ nc net.Conn
+ idx int
+ length int
+ timeout time.Duration
+}
+
+func newBuffer(nc net.Conn) buffer {
+ var b [defaultBufSize]byte
+ return buffer{
+ buf: b[:],
+ nc: nc,
+ }
+}
+
+// fill reads into the buffer until at least _need_ bytes are in it
+func (b *buffer) fill(need int) error {
+ n := b.length
+
+ // move existing data to the beginning
+ if n > 0 && b.idx > 0 {
+ copy(b.buf[0:n], b.buf[b.idx:])
+ }
+
+ // grow buffer if necessary
+ // TODO: let the buffer shrink again at some point
+ // Maybe keep the org buf slice and swap back?
+ if need > len(b.buf) {
+ // Round up to the next multiple of the default size
+ newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+
+ b.idx = 0
+
+ for {
+ if b.timeout > 0 {
+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+ return err
+ }
+ }
+
+ nn, err := b.nc.Read(b.buf[n:])
+ n += nn
+
+ switch err {
+ case nil:
+ if n < need {
+ continue
+ }
+ b.length = n
+ return nil
+
+ case io.EOF:
+ if n >= need {
+ b.length = n
+ return nil
+ }
+ return io.ErrUnexpectedEOF
+
+ default:
+ return err
+ }
+ }
+}
+
+// returns next N bytes from buffer.
+// The returned slice is only guaranteed to be valid until the next read
+func (b *buffer) readNext(need int) ([]byte, error) {
+ if b.length < need {
+ // refill
+ if err := b.fill(need); err != nil {
+ return nil, err
+ }
+ }
+
+ offset := b.idx
+ b.idx += need
+ b.length -= need
+ return b.buf[offset:b.idx], nil
+}
+
+// returns a buffer with the requested size.
+// If possible, a slice from the existing buffer is returned.
+// Otherwise a bigger buffer is made.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeBuffer(length int) []byte {
+ if b.length > 0 {
+ return nil
+ }
+
+ // test (cheap) general case first
+ if length <= defaultBufSize || length <= cap(b.buf) {
+ return b.buf[:length]
+ }
+
+ if length < maxPacketSize {
+ b.buf = make([]byte, length)
+ return b.buf
+ }
+ return make([]byte, length)
+}
+
+// shortcut which can be used if the requested buffer is guaranteed to be
+// smaller than defaultBufSize
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeSmallBuffer(length int) []byte {
+ if b.length > 0 {
+ return nil
+ }
+ return b.buf[:length]
+}
+
+// takeCompleteBuffer returns the complete existing buffer.
+// This can be used if the necessary buffer size is unknown.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeCompleteBuffer() []byte {
+ if b.length > 0 {
+ return nil
+ }
+ return b.buf
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
new file mode 100644
index 0000000..136c9e4
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -0,0 +1,251 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const defaultCollation = "utf8_general_ci"
+const binaryCollation = "binary"
+
+// A list of available collations mapped to the internal ID.
+// To update this map use the following MySQL query:
+// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
+var collations = map[string]byte{
+ "big5_chinese_ci": 1,
+ "latin2_czech_cs": 2,
+ "dec8_swedish_ci": 3,
+ "cp850_general_ci": 4,
+ "latin1_german1_ci": 5,
+ "hp8_english_ci": 6,
+ "koi8r_general_ci": 7,
+ "latin1_swedish_ci": 8,
+ "latin2_general_ci": 9,
+ "swe7_swedish_ci": 10,
+ "ascii_general_ci": 11,
+ "ujis_japanese_ci": 12,
+ "sjis_japanese_ci": 13,
+ "cp1251_bulgarian_ci": 14,
+ "latin1_danish_ci": 15,
+ "hebrew_general_ci": 16,
+ "tis620_thai_ci": 18,
+ "euckr_korean_ci": 19,
+ "latin7_estonian_cs": 20,
+ "latin2_hungarian_ci": 21,
+ "koi8u_general_ci": 22,
+ "cp1251_ukrainian_ci": 23,
+ "gb2312_chinese_ci": 24,
+ "greek_general_ci": 25,
+ "cp1250_general_ci": 26,
+ "latin2_croatian_ci": 27,
+ "gbk_chinese_ci": 28,
+ "cp1257_lithuanian_ci": 29,
+ "latin5_turkish_ci": 30,
+ "latin1_german2_ci": 31,
+ "armscii8_general_ci": 32,
+ "utf8_general_ci": 33,
+ "cp1250_czech_cs": 34,
+ "ucs2_general_ci": 35,
+ "cp866_general_ci": 36,
+ "keybcs2_general_ci": 37,
+ "macce_general_ci": 38,
+ "macroman_general_ci": 39,
+ "cp852_general_ci": 40,
+ "latin7_general_ci": 41,
+ "latin7_general_cs": 42,
+ "macce_bin": 43,
+ "cp1250_croatian_ci": 44,
+ "utf8mb4_general_ci": 45,
+ "utf8mb4_bin": 46,
+ "latin1_bin": 47,
+ "latin1_general_ci": 48,
+ "latin1_general_cs": 49,
+ "cp1251_bin": 50,
+ "cp1251_general_ci": 51,
+ "cp1251_general_cs": 52,
+ "macroman_bin": 53,
+ "utf16_general_ci": 54,
+ "utf16_bin": 55,
+ "utf16le_general_ci": 56,
+ "cp1256_general_ci": 57,
+ "cp1257_bin": 58,
+ "cp1257_general_ci": 59,
+ "utf32_general_ci": 60,
+ "utf32_bin": 61,
+ "utf16le_bin": 62,
+ "binary": 63,
+ "armscii8_bin": 64,
+ "ascii_bin": 65,
+ "cp1250_bin": 66,
+ "cp1256_bin": 67,
+ "cp866_bin": 68,
+ "dec8_bin": 69,
+ "greek_bin": 70,
+ "hebrew_bin": 71,
+ "hp8_bin": 72,
+ "keybcs2_bin": 73,
+ "koi8r_bin": 74,
+ "koi8u_bin": 75,
+ "latin2_bin": 77,
+ "latin5_bin": 78,
+ "latin7_bin": 79,
+ "cp850_bin": 80,
+ "cp852_bin": 81,
+ "swe7_bin": 82,
+ "utf8_bin": 83,
+ "big5_bin": 84,
+ "euckr_bin": 85,
+ "gb2312_bin": 86,
+ "gbk_bin": 87,
+ "sjis_bin": 88,
+ "tis620_bin": 89,
+ "ucs2_bin": 90,
+ "ujis_bin": 91,
+ "geostd8_general_ci": 92,
+ "geostd8_bin": 93,
+ "latin1_spanish_ci": 94,
+ "cp932_japanese_ci": 95,
+ "cp932_bin": 96,
+ "eucjpms_japanese_ci": 97,
+ "eucjpms_bin": 98,
+ "cp1250_polish_ci": 99,
+ "utf16_unicode_ci": 101,
+ "utf16_icelandic_ci": 102,
+ "utf16_latvian_ci": 103,
+ "utf16_romanian_ci": 104,
+ "utf16_slovenian_ci": 105,
+ "utf16_polish_ci": 106,
+ "utf16_estonian_ci": 107,
+ "utf16_spanish_ci": 108,
+ "utf16_swedish_ci": 109,
+ "utf16_turkish_ci": 110,
+ "utf16_czech_ci": 111,
+ "utf16_danish_ci": 112,
+ "utf16_lithuanian_ci": 113,
+ "utf16_slovak_ci": 114,
+ "utf16_spanish2_ci": 115,
+ "utf16_roman_ci": 116,
+ "utf16_persian_ci": 117,
+ "utf16_esperanto_ci": 118,
+ "utf16_hungarian_ci": 119,
+ "utf16_sinhala_ci": 120,
+ "utf16_german2_ci": 121,
+ "utf16_croatian_ci": 122,
+ "utf16_unicode_520_ci": 123,
+ "utf16_vietnamese_ci": 124,
+ "ucs2_unicode_ci": 128,
+ "ucs2_icelandic_ci": 129,
+ "ucs2_latvian_ci": 130,
+ "ucs2_romanian_ci": 131,
+ "ucs2_slovenian_ci": 132,
+ "ucs2_polish_ci": 133,
+ "ucs2_estonian_ci": 134,
+ "ucs2_spanish_ci": 135,
+ "ucs2_swedish_ci": 136,
+ "ucs2_turkish_ci": 137,
+ "ucs2_czech_ci": 138,
+ "ucs2_danish_ci": 139,
+ "ucs2_lithuanian_ci": 140,
+ "ucs2_slovak_ci": 141,
+ "ucs2_spanish2_ci": 142,
+ "ucs2_roman_ci": 143,
+ "ucs2_persian_ci": 144,
+ "ucs2_esperanto_ci": 145,
+ "ucs2_hungarian_ci": 146,
+ "ucs2_sinhala_ci": 147,
+ "ucs2_german2_ci": 148,
+ "ucs2_croatian_ci": 149,
+ "ucs2_unicode_520_ci": 150,
+ "ucs2_vietnamese_ci": 151,
+ "ucs2_general_mysql500_ci": 159,
+ "utf32_unicode_ci": 160,
+ "utf32_icelandic_ci": 161,
+ "utf32_latvian_ci": 162,
+ "utf32_romanian_ci": 163,
+ "utf32_slovenian_ci": 164,
+ "utf32_polish_ci": 165,
+ "utf32_estonian_ci": 166,
+ "utf32_spanish_ci": 167,
+ "utf32_swedish_ci": 168,
+ "utf32_turkish_ci": 169,
+ "utf32_czech_ci": 170,
+ "utf32_danish_ci": 171,
+ "utf32_lithuanian_ci": 172,
+ "utf32_slovak_ci": 173,
+ "utf32_spanish2_ci": 174,
+ "utf32_roman_ci": 175,
+ "utf32_persian_ci": 176,
+ "utf32_esperanto_ci": 177,
+ "utf32_hungarian_ci": 178,
+ "utf32_sinhala_ci": 179,
+ "utf32_german2_ci": 180,
+ "utf32_croatian_ci": 181,
+ "utf32_unicode_520_ci": 182,
+ "utf32_vietnamese_ci": 183,
+ "utf8_unicode_ci": 192,
+ "utf8_icelandic_ci": 193,
+ "utf8_latvian_ci": 194,
+ "utf8_romanian_ci": 195,
+ "utf8_slovenian_ci": 196,
+ "utf8_polish_ci": 197,
+ "utf8_estonian_ci": 198,
+ "utf8_spanish_ci": 199,
+ "utf8_swedish_ci": 200,
+ "utf8_turkish_ci": 201,
+ "utf8_czech_ci": 202,
+ "utf8_danish_ci": 203,
+ "utf8_lithuanian_ci": 204,
+ "utf8_slovak_ci": 205,
+ "utf8_spanish2_ci": 206,
+ "utf8_roman_ci": 207,
+ "utf8_persian_ci": 208,
+ "utf8_esperanto_ci": 209,
+ "utf8_hungarian_ci": 210,
+ "utf8_sinhala_ci": 211,
+ "utf8_german2_ci": 212,
+ "utf8_croatian_ci": 213,
+ "utf8_unicode_520_ci": 214,
+ "utf8_vietnamese_ci": 215,
+ "utf8_general_mysql500_ci": 223,
+ "utf8mb4_unicode_ci": 224,
+ "utf8mb4_icelandic_ci": 225,
+ "utf8mb4_latvian_ci": 226,
+ "utf8mb4_romanian_ci": 227,
+ "utf8mb4_slovenian_ci": 228,
+ "utf8mb4_polish_ci": 229,
+ "utf8mb4_estonian_ci": 230,
+ "utf8mb4_spanish_ci": 231,
+ "utf8mb4_swedish_ci": 232,
+ "utf8mb4_turkish_ci": 233,
+ "utf8mb4_czech_ci": 234,
+ "utf8mb4_danish_ci": 235,
+ "utf8mb4_lithuanian_ci": 236,
+ "utf8mb4_slovak_ci": 237,
+ "utf8mb4_spanish2_ci": 238,
+ "utf8mb4_roman_ci": 239,
+ "utf8mb4_persian_ci": 240,
+ "utf8mb4_esperanto_ci": 241,
+ "utf8mb4_hungarian_ci": 242,
+ "utf8mb4_sinhala_ci": 243,
+ "utf8mb4_german2_ci": 244,
+ "utf8mb4_croatian_ci": 245,
+ "utf8mb4_unicode_520_ci": 246,
+ "utf8mb4_vietnamese_ci": 247,
+}
+
+// A blacklist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
new file mode 100644
index 0000000..e570614
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -0,0 +1,461 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// a copy of context.Context for Go 1.7 and earlier
+type mysqlContext interface {
+ Done() <-chan struct{}
+ Err() error
+
+ // defined in context.Context, but not used in this driver:
+ // Deadline() (deadline time.Time, ok bool)
+ // Value(key interface{}) interface{}
+}
+
+type mysqlConn struct {
+ buf buffer
+ netConn net.Conn
+ affectedRows uint64
+ insertId uint64
+ cfg *Config
+ maxAllowedPacket int
+ maxWriteSize int
+ writeTimeout time.Duration
+ flags clientFlag
+ status statusFlag
+ sequence uint8
+ parseTime bool
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- mysqlContext
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
+}
+
+// Handles parameters set in DSN after the connection is established
+func (mc *mysqlConn) handleParams() (err error) {
+ for param, val := range mc.cfg.Params {
+ switch param {
+ // Charset
+ case "charset":
+ charsets := strings.Split(val, ",")
+ for i := range charsets {
+ // ignore errors here - a charset may not exist
+ err = mc.exec("SET NAMES " + charsets[i])
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return
+ }
+
+ // System Vars
+ default:
+ err = mc.exec("SET " + param + "=" + val + "")
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
+func (mc *mysqlConn) Begin() (driver.Tx, error) {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
+ if err == nil {
+ return &mysqlTx{mc}, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+func (mc *mysqlConn) Close() (err error) {
+ // Makes Close idempotent
+ if !mc.closed.IsSet() {
+ err = mc.writeCommandPacket(comQuit)
+ }
+
+ mc.cleanup()
+
+ return
+}
+
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+ if !mc.closed.TrySet(true) {
+ return
+ }
+
+ // Makes cleanup idempotent
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.IsSet() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
+ }
+ return ErrInvalidConn
+ }
+ return nil
+}
+
+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comStmtPrepare, query)
+ if err != nil {
+ return nil, mc.markBadConn(err)
+ }
+
+ stmt := &mysqlStmt{
+ mc: mc,
+ }
+
+ // Read Result
+ columnCount, err := stmt.readPrepareResultPacket()
+ if err == nil {
+ if stmt.paramCount > 0 {
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if columnCount > 0 {
+ err = mc.readUntilEOF()
+ }
+ }
+
+ return stmt, err
+}
+
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+ // Number of ? should be same to len(args)
+ if strings.Count(query, "?") != len(args) {
+ return "", driver.ErrSkip
+ }
+
+ buf := mc.buf.takeCompleteBuffer()
+ if buf == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return "", ErrInvalidConn
+ }
+ buf = buf[:0]
+ argPos := 0
+
+ for i := 0; i < len(query); i++ {
+ q := strings.IndexByte(query[i:], '?')
+ if q == -1 {
+ buf = append(buf, query[i:]...)
+ break
+ }
+ buf = append(buf, query[i:i+q]...)
+ i += q
+
+ arg := args[argPos]
+ argPos++
+
+ if arg == nil {
+ buf = append(buf, "NULL"...)
+ continue
+ }
+
+ switch v := arg.(type) {
+ case int64:
+ buf = strconv.AppendInt(buf, v, 10)
+ case float64:
+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+ case bool:
+ if v {
+ buf = append(buf, '1')
+ } else {
+ buf = append(buf, '0')
+ }
+ case time.Time:
+ if v.IsZero() {
+ buf = append(buf, "'0000-00-00'"...)
+ } else {
+ v := v.In(mc.cfg.Loc)
+ v = v.Add(time.Nanosecond * 500) // To round under microsecond
+ year := v.Year()
+ year100 := year / 100
+ year1 := year % 100
+ month := v.Month()
+ day := v.Day()
+ hour := v.Hour()
+ minute := v.Minute()
+ second := v.Second()
+ micro := v.Nanosecond() / 1000
+
+ buf = append(buf, []byte{
+ '\'',
+ digits10[year100], digits01[year100],
+ digits10[year1], digits01[year1],
+ '-',
+ digits10[month], digits01[month],
+ '-',
+ digits10[day], digits01[day],
+ ' ',
+ digits10[hour], digits01[hour],
+ ':',
+ digits10[minute], digits01[minute],
+ ':',
+ digits10[second], digits01[second],
+ }...)
+
+ if micro != 0 {
+ micro10000 := micro / 10000
+ micro100 := micro / 100 % 100
+ micro1 := micro % 100
+ buf = append(buf, []byte{
+ '.',
+ digits10[micro10000], digits01[micro10000],
+ digits10[micro100], digits01[micro100],
+ digits10[micro1], digits01[micro1],
+ }...)
+ }
+ buf = append(buf, '\'')
+ }
+ case []byte:
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, "_binary'"...)
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ }
+ case string:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeStringBackslash(buf, v)
+ } else {
+ buf = escapeStringQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ default:
+ return "", driver.ErrSkip
+ }
+
+ if len(buf)+4 > mc.maxAllowedPacket {
+ return "", driver.ErrSkip
+ }
+ }
+ if argPos != len(args) {
+ return "", driver.ErrSkip
+ }
+ return string(buf), nil
+}
+
+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ err := mc.exec(query)
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Internal function to execute commands
+func (mc *mysqlConn) exec(query string) error {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+
+ return mc.discardResults()
+}
+
+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try client-side prepare to reduce roundtrip
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err == nil {
+ // Read Result
+ var resLen int
+ resLen, err = mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+
+ if resLen == 0 {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ // Columns
+ rows.rs.columns, err = mc.readColumns(resLen)
+ return rows, err
+ }
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Gets the value of the given MySQL System Variable
+// The returned byte slice is only valid until the next read
+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
+ return nil, err
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+
+ if resLen > 0 {
+ // Columns
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ dest := make([]driver.Value, resLen)
+ if err = rows.readRow(dest); err == nil {
+ return dest[0].([]byte), mc.readUntilEOF()
+ }
+ }
+ return nil, err
+}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
new file mode 100644
index 0000000..ce52c7d
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
@@ -0,0 +1,207 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+)
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err = mc.watchCancel(ctx); err != nil {
+ return
+ }
+ defer mc.finish()
+
+ if err = mc.writeCommandPacket(comPing); err != nil {
+ return
+ }
+
+ return mc.readResultOK()
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ // When ctx is already cancelled, don't watch it.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ // When ctx is not cancellable, don't watch it.
+ if ctx.Done() == nil {
+ return nil
+ }
+ // When watcher is not alive, can't watch it.
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watching = true
+ mc.watcher <- ctx
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan mysqlContext, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx mysqlContext
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+// ResetSession implements driver.SessionResetter.
+// (From Go 1.10)
+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
+ if mc.closed.IsSet() {
+ return driver.ErrBadConn
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
new file mode 100644
index 0000000..b1e6b85
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -0,0 +1,174 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const (
+ defaultAuthPlugin = "mysql_native_password"
+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ minProtocolVersion = 10
+ maxPacketSize = 1<<24 - 1
+ timeFormat = "2006-01-02 15:04:05.999999"
+)
+
+// MySQL constants documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+const (
+ iOK byte = 0x00
+ iAuthMoreData byte = 0x01
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
+)
+
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
+type clientFlag uint32
+
+const (
+ clientLongPassword clientFlag = 1 << iota
+ clientFoundRows
+ clientLongFlag
+ clientConnectWithDB
+ clientNoSchema
+ clientCompress
+ clientODBC
+ clientLocalFiles
+ clientIgnoreSpace
+ clientProtocol41
+ clientInteractive
+ clientSSL
+ clientIgnoreSIGPIPE
+ clientTransactions
+ clientReserved
+ clientSecureConn
+ clientMultiStatements
+ clientMultiResults
+ clientPSMultiResults
+ clientPluginAuth
+ clientConnectAttrs
+ clientPluginAuthLenEncClientData
+ clientCanHandleExpiredPasswords
+ clientSessionTrack
+ clientDeprecateEOF
+)
+
+const (
+ comQuit byte = iota + 1
+ comInitDB
+ comQuery
+ comFieldList
+ comCreateDB
+ comDropDB
+ comRefresh
+ comShutdown
+ comStatistics
+ comProcessInfo
+ comConnect
+ comProcessKill
+ comDebug
+ comPing
+ comTime
+ comDelayedInsert
+ comChangeUser
+ comBinlogDump
+ comTableDump
+ comConnectOut
+ comRegisterSlave
+ comStmtPrepare
+ comStmtExecute
+ comStmtSendLongData
+ comStmtClose
+ comStmtReset
+ comSetOption
+ comStmtFetch
+)
+
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
+const (
+ fieldTypeDecimal fieldType = iota
+ fieldTypeTiny
+ fieldTypeShort
+ fieldTypeLong
+ fieldTypeFloat
+ fieldTypeDouble
+ fieldTypeNULL
+ fieldTypeTimestamp
+ fieldTypeLongLong
+ fieldTypeInt24
+ fieldTypeDate
+ fieldTypeTime
+ fieldTypeDateTime
+ fieldTypeYear
+ fieldTypeNewDate
+ fieldTypeVarChar
+ fieldTypeBit
+)
+const (
+ fieldTypeJSON fieldType = iota + 0xf5
+ fieldTypeNewDecimal
+ fieldTypeEnum
+ fieldTypeSet
+ fieldTypeTinyBLOB
+ fieldTypeMediumBLOB
+ fieldTypeLongBLOB
+ fieldTypeBLOB
+ fieldTypeVarString
+ fieldTypeString
+ fieldTypeGeometry
+)
+
+type fieldFlag uint16
+
+const (
+ flagNotNULL fieldFlag = 1 << iota
+ flagPriKey
+ flagUniqueKey
+ flagMultipleKey
+ flagBLOB
+ flagUnsigned
+ flagZeroFill
+ flagBinary
+ flagEnum
+ flagAutoIncrement
+ flagTimestamp
+ flagSet
+ flagUnknown1
+ flagUnknown2
+ flagUnknown3
+ flagUnknown4
+)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+ statusInTrans statusFlag = 1 << iota
+ statusInAutocommit
+ statusReserved // Not in documentation
+ statusMoreResultsExists
+ statusNoGoodIndexUsed
+ statusNoIndexUsed
+ statusCursorExists
+ statusLastRowSent
+ statusDbDropped
+ statusNoBackslashEscapes
+ statusMetadataChanged
+ statusQueryWasSlow
+ statusPsOutParams
+ statusInTransReadonly
+ statusSessionStateChanged
+)
+
+const (
+ cachingSha2PasswordRequestPublicKey = 2
+ cachingSha2PasswordFastAuthSuccess = 3
+ cachingSha2PasswordPerformFullAuthentication = 4
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
new file mode 100644
index 0000000..e9ede2c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -0,0 +1,172 @@
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Package mysql provides a MySQL driver for Go's database/sql package.
+//
+// The driver should be used via the database/sql package:
+//
+// import "database/sql"
+// import _ "github.com/go-sql-driver/mysql"
+//
+// db, err := sql.Open("mysql", "user:password@/dbname")
+//
+// See https://github.com/go-sql-driver/mysql#usage for details
+package mysql
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "net"
+ "sync"
+)
+
+// watcher interface is used for context support (From Go 1.8)
+type watcher interface {
+ startWatcher()
+}
+
+// MySQLDriver is exported to make the driver directly accessible.
+// In general the driver is used via the database/sql package.
+type MySQLDriver struct{}
+
+// DialFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDial
+type DialFunc func(addr string) (net.Conn, error)
+
+var (
+ dialsLock sync.RWMutex
+ dials map[string]DialFunc
+)
+
+// RegisterDial registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// addr is passed as a parameter to the dial function.
+func RegisterDial(net string, dial DialFunc) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials == nil {
+ dials = make(map[string]DialFunc)
+ }
+ dials[net] = dial
+}
+
+// Open new Connection.
+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
+// the DSN string is formated
+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
+ var err error
+
+ // New mysqlConn
+ mc := &mysqlConn{
+ maxAllowedPacket: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
+ }
+ mc.cfg, err = ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ mc.parseTime = mc.cfg.ParseTime
+
+ // Connect to Server
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
+ mc.netConn, err = dial(mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
+ mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Enable TCP Keepalives on TCP connections
+ if tc, ok := mc.netConn.(*net.TCPConn); ok {
+ if err := tc.SetKeepAlive(true); err != nil {
+ // Don't send COM_QUIT before handshake.
+ mc.netConn.Close()
+ mc.netConn = nil
+ return nil, err
+ }
+ }
+
+ // Call startWatcher for context support (From Go 1.8)
+ if s, ok := interface{}(mc).(watcher); ok {
+ s.startWatcher()
+ }
+
+ mc.buf = newBuffer(mc.netConn)
+
+ // Set I/O timeouts
+ mc.buf.timeout = mc.cfg.ReadTimeout
+ mc.writeTimeout = mc.cfg.WriteTimeout
+
+ // Reading Handshake Initialization Packet
+ authData, plugin, err := mc.readHandshakePacket()
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ if plugin == "" {
+ plugin = defaultAuthPlugin
+ }
+
+ // Send Client Authentication Packet
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ // try the default auth plugin, if using the requested plugin failed
+ errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ plugin = defaultAuthPlugin
+ authResp, err = mc.auth(authData, plugin)
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ }
+ if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Handle response to auth packet, switch methods if possible
+ if err = mc.handleAuthResult(authData, plugin); err != nil {
+ // Authentication failed and MySQL has already closed the connection
+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+ // Do not send COM_QUIT, just cleanup and return the error.
+ mc.cleanup()
+ return nil, err
+ }
+
+ if mc.cfg.MaxAllowedPacket > 0 {
+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+ } else {
+ // Get max allowed packet size
+ maxap, err := mc.getSystemVar("max_allowed_packet")
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ mc.maxAllowedPacket = stringToInt(maxap) - 1
+ }
+ if mc.maxAllowedPacket < maxPacketSize {
+ mc.maxWriteSize = mc.maxAllowedPacket
+ }
+
+ // Handle DSN Params
+ err = mc.handleParams()
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+
+ return mc, nil
+}
+
+func init() {
+ sql.Register("mysql", &MySQLDriver{})
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644
index 0000000..be014ba
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -0,0 +1,611 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
+type Config struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ pubKey *rsa.PublicKey // Server public key
+ TLSConfig string // TLS configuration name
+ tls *tls.Config // TLS configuration
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowNativePasswords bool // Allows the native password authentication method
+ AllowOldPasswords bool // Allows the old insecure password method
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ }
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ if cfg.tls != nil {
+ if cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.tls.ServerName = host
+ }
+ }
+ }
+
+ return nil
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+ var buf bytes.Buffer
+
+ // [username[:password]@]
+ if len(cfg.User) > 0 {
+ buf.WriteString(cfg.User)
+ if len(cfg.Passwd) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(cfg.Passwd)
+ }
+ buf.WriteByte('@')
+ }
+
+ // [protocol[(address)]]
+ if len(cfg.Net) > 0 {
+ buf.WriteString(cfg.Net)
+ if len(cfg.Addr) > 0 {
+ buf.WriteByte('(')
+ buf.WriteString(cfg.Addr)
+ buf.WriteByte(')')
+ }
+ }
+
+ // /dbname
+ buf.WriteByte('/')
+ buf.WriteString(cfg.DBName)
+
+ // [?param1=value1&...¶mN=valueN]
+ hasParam := false
+
+ if cfg.AllowAllFiles {
+ hasParam = true
+ buf.WriteString("?allowAllFiles=true")
+ }
+
+ if cfg.AllowCleartextPasswords {
+ if hasParam {
+ buf.WriteString("&allowCleartextPasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowCleartextPasswords=true")
+ }
+ }
+
+ if !cfg.AllowNativePasswords {
+ if hasParam {
+ buf.WriteString("&allowNativePasswords=false")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowNativePasswords=false")
+ }
+ }
+
+ if cfg.AllowOldPasswords {
+ if hasParam {
+ buf.WriteString("&allowOldPasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowOldPasswords=true")
+ }
+ }
+
+ if cfg.ClientFoundRows {
+ if hasParam {
+ buf.WriteString("&clientFoundRows=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?clientFoundRows=true")
+ }
+ }
+
+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if hasParam {
+ buf.WriteString("&collation=")
+ } else {
+ hasParam = true
+ buf.WriteString("?collation=")
+ }
+ buf.WriteString(col)
+ }
+
+ if cfg.ColumnsWithAlias {
+ if hasParam {
+ buf.WriteString("&columnsWithAlias=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?columnsWithAlias=true")
+ }
+ }
+
+ if cfg.InterpolateParams {
+ if hasParam {
+ buf.WriteString("&interpolateParams=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?interpolateParams=true")
+ }
+ }
+
+ if cfg.Loc != time.UTC && cfg.Loc != nil {
+ if hasParam {
+ buf.WriteString("&loc=")
+ } else {
+ hasParam = true
+ buf.WriteString("?loc=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.Loc.String()))
+ }
+
+ if cfg.MultiStatements {
+ if hasParam {
+ buf.WriteString("&multiStatements=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?multiStatements=true")
+ }
+ }
+
+ if cfg.ParseTime {
+ if hasParam {
+ buf.WriteString("&parseTime=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?parseTime=true")
+ }
+ }
+
+ if cfg.ReadTimeout > 0 {
+ if hasParam {
+ buf.WriteString("&readTimeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?readTimeout=")
+ }
+ buf.WriteString(cfg.ReadTimeout.String())
+ }
+
+ if cfg.RejectReadOnly {
+ if hasParam {
+ buf.WriteString("&rejectReadOnly=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?rejectReadOnly=true")
+ }
+ }
+
+ if len(cfg.ServerPubKey) > 0 {
+ if hasParam {
+ buf.WriteString("&serverPubKey=")
+ } else {
+ hasParam = true
+ buf.WriteString("?serverPubKey=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.ServerPubKey))
+ }
+
+ if cfg.Timeout > 0 {
+ if hasParam {
+ buf.WriteString("&timeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?timeout=")
+ }
+ buf.WriteString(cfg.Timeout.String())
+ }
+
+ if len(cfg.TLSConfig) > 0 {
+ if hasParam {
+ buf.WriteString("&tls=")
+ } else {
+ hasParam = true
+ buf.WriteString("?tls=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.TLSConfig))
+ }
+
+ if cfg.WriteTimeout > 0 {
+ if hasParam {
+ buf.WriteString("&writeTimeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?writeTimeout=")
+ }
+ buf.WriteString(cfg.WriteTimeout.String())
+ }
+
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
+ if hasParam {
+ buf.WriteString("&maxAllowedPacket=")
+ } else {
+ hasParam = true
+ buf.WriteString("?maxAllowedPacket=")
+ }
+ buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
+
+ }
+
+ // other params
+ if cfg.Params != nil {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
+ if hasParam {
+ buf.WriteByte('&')
+ } else {
+ hasParam = true
+ buf.WriteByte('?')
+ }
+
+ buf.WriteString(param)
+ buf.WriteByte('=')
+ buf.WriteString(url.QueryEscape(cfg.Params[param]))
+ }
+ }
+
+ return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+ // New config with some default values
+ cfg = NewConfig()
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...¶mN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+ return
+ }
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if err = cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+ for _, v := range strings.Split(params, "&") {
+ param := strings.SplitN(v, "=", 2)
+ if len(param) != 2 {
+ continue
+ }
+
+ // cfg params
+ switch value := param[1]; param[0] {
+ // Disable INFILE whitelist / enable all files
+ case "allowAllFiles":
+ var isBool bool
+ cfg.AllowAllFiles, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use cleartext authentication mode (MySQL 5.5.10+)
+ case "allowCleartextPasswords":
+ var isBool bool
+ cfg.AllowCleartextPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use native password authentication
+ case "allowNativePasswords":
+ var isBool bool
+ cfg.AllowNativePasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use old authentication mode (pre MySQL 4.1)
+ case "allowOldPasswords":
+ var isBool bool
+ cfg.AllowOldPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Switch "rowsAffected" mode
+ case "clientFoundRows":
+ var isBool bool
+ cfg.ClientFoundRows, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Collation
+ case "collation":
+ cfg.Collation = value
+ break
+
+ case "columnsWithAlias":
+ var isBool bool
+ cfg.ColumnsWithAlias, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Compression
+ case "compress":
+ return errors.New("compression not implemented yet")
+
+ // Enable client side placeholder substitution
+ case "interpolateParams":
+ var isBool bool
+ cfg.InterpolateParams, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Time Location
+ case "loc":
+ if value, err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ cfg.Loc, err = time.LoadLocation(value)
+ if err != nil {
+ return
+ }
+
+ // multiple statements in one query
+ case "multiStatements":
+ var isBool bool
+ cfg.MultiStatements, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // time.Time parsing
+ case "parseTime":
+ var isBool bool
+ cfg.ParseTime, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // I/O read Timeout
+ case "readTimeout":
+ cfg.ReadTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // Reject read-only connections
+ case "rejectReadOnly":
+ var isBool bool
+ cfg.RejectReadOnly, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Server public key
+ case "serverPubKey":
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for server pub key name: %v", err)
+ }
+
+ if pubKey := getServerPubKey(name); pubKey != nil {
+ cfg.ServerPubKey = name
+ cfg.pubKey = pubKey
+ } else {
+ return errors.New("invalid value / unknown server pub key name: " + name)
+ }
+
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
+ // Dial Timeout
+ case "timeout":
+ cfg.Timeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // TLS-Encryption
+ case "tls":
+ boolValue, isBool := readBool(value)
+ if isBool {
+ if boolValue {
+ cfg.TLSConfig = "true"
+ cfg.tls = &tls.Config{}
+ } else {
+ cfg.TLSConfig = "false"
+ }
+ } else if vl := strings.ToLower(value); vl == "skip-verify" {
+ cfg.TLSConfig = vl
+ cfg.tls = &tls.Config{InsecureSkipVerify: true}
+ } else {
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for TLS config name: %v", err)
+ }
+
+ if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
+ cfg.TLSConfig = name
+ cfg.tls = tlsConfig
+ } else {
+ return errors.New("invalid value / unknown config name: " + name)
+ }
+ }
+
+ // I/O write Timeout
+ case "writeTimeout":
+ cfg.WriteTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+ case "maxAllowedPacket":
+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+ if err != nil {
+ return
+ }
+ default:
+ // lazy init
+ if cfg.Params == nil {
+ cfg.Params = make(map[string]string)
+ }
+
+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
new file mode 100644
index 0000000..760782f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -0,0 +1,65 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "os"
+)
+
+// Various errors the driver might return. Can change between driver versions.
+var (
+ ErrInvalidConn = errors.New("invalid connection")
+ ErrMalformPkt = errors.New("malformed packet")
+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+ ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
+)
+
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+
+// Logger is used to log critical error messages.
+type Logger interface {
+ Print(v ...interface{})
+}
+
+// SetLogger is used to set the logger for critical errors.
+// The initial logger is os.Stderr.
+func SetLogger(logger Logger) error {
+ if logger == nil {
+ return errors.New("logger is nil")
+ }
+ errLog = logger
+ return nil
+}
+
+// MySQLError is an error type which represents a single MySQL error
+type MySQLError struct {
+ Number uint16
+ Message string
+}
+
+func (me *MySQLError) Error() string {
+ return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 0000000..e1e2ece
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,194 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+ switch mf.fieldType {
+ case fieldTypeBit:
+ return "BIT"
+ case fieldTypeBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TEXT"
+ }
+ return "BLOB"
+ case fieldTypeDate:
+ return "DATE"
+ case fieldTypeDateTime:
+ return "DATETIME"
+ case fieldTypeDecimal:
+ return "DECIMAL"
+ case fieldTypeDouble:
+ return "DOUBLE"
+ case fieldTypeEnum:
+ return "ENUM"
+ case fieldTypeFloat:
+ return "FLOAT"
+ case fieldTypeGeometry:
+ return "GEOMETRY"
+ case fieldTypeInt24:
+ return "MEDIUMINT"
+ case fieldTypeJSON:
+ return "JSON"
+ case fieldTypeLong:
+ return "INT"
+ case fieldTypeLongBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "LONGTEXT"
+ }
+ return "LONGBLOB"
+ case fieldTypeLongLong:
+ return "BIGINT"
+ case fieldTypeMediumBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "MEDIUMTEXT"
+ }
+ return "MEDIUMBLOB"
+ case fieldTypeNewDate:
+ return "DATE"
+ case fieldTypeNewDecimal:
+ return "DECIMAL"
+ case fieldTypeNULL:
+ return "NULL"
+ case fieldTypeSet:
+ return "SET"
+ case fieldTypeShort:
+ return "SMALLINT"
+ case fieldTypeString:
+ if mf.charSet == collations[binaryCollation] {
+ return "BINARY"
+ }
+ return "CHAR"
+ case fieldTypeTime:
+ return "TIME"
+ case fieldTypeTimestamp:
+ return "TIMESTAMP"
+ case fieldTypeTiny:
+ return "TINYINT"
+ case fieldTypeTinyBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TINYTEXT"
+ }
+ return "TINYBLOB"
+ case fieldTypeVarChar:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeVarString:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeYear:
+ return "YEAR"
+ default:
+ return ""
+ }
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+ charSet uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
new file mode 100644
index 0000000..273cb0b
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ fileRegister map[string]bool
+ fileRegisterLock sync.RWMutex
+ readerRegister map[string]func() io.Reader
+ readerRegisterLock sync.RWMutex
+)
+
+// RegisterLocalFile adds the given file to the file whitelist,
+// so that it can be used by "LOAD DATA LOCAL INFILE ".
+// Alternatively you can allow the use of all local files with
+// the DSN parameter 'allowAllFiles=true'
+//
+// filePath := "/home/gopher/data.csv"
+// mysql.RegisterLocalFile(filePath)
+// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ // lazy map init
+ if fileRegister == nil {
+ fileRegister = make(map[string]bool)
+ }
+
+ fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegisterLock.Unlock()
+}
+
+// DeregisterLocalFile removes the given filepath from the whitelist.
+func DeregisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ delete(fileRegister, strings.Trim(filePath, `"`))
+ fileRegisterLock.Unlock()
+}
+
+// RegisterReaderHandler registers a handler function which is used
+// to receive a io.Reader.
+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
+// If the handler returns a io.ReadCloser Close() is called when the
+// request is finished.
+//
+// mysql.RegisterReaderHandler("data", func() io.Reader {
+// var csvReader io.Reader // Some Reader that returns CSV data
+// ... // Open Reader here
+// return csvReader
+// })
+// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterReaderHandler(name string, handler func() io.Reader) {
+ readerRegisterLock.Lock()
+ // lazy map init
+ if readerRegister == nil {
+ readerRegister = make(map[string]func() io.Reader)
+ }
+
+ readerRegister[name] = handler
+ readerRegisterLock.Unlock()
+}
+
+// DeregisterReaderHandler removes the ReaderHandler function with
+// the given name from the registry.
+func DeregisterReaderHandler(name string) {
+ readerRegisterLock.Lock()
+ delete(readerRegister, name)
+ readerRegisterLock.Unlock()
+}
+
+func deferredClose(err *error, closer io.Closer) {
+ closeErr := closer.Close()
+ if *err == nil {
+ *err = closeErr
+ }
+}
+
+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+ var rdr io.Reader
+ var data []byte
+ packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+ if mc.maxWriteSize < packetSize {
+ packetSize = mc.maxWriteSize
+ }
+
+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+ // The server might return an an absolute path. See issue #355.
+ name = name[idx+8:]
+
+ readerRegisterLock.RLock()
+ handler, inMap := readerRegister[name]
+ readerRegisterLock.RUnlock()
+
+ if inMap {
+ rdr = handler()
+ if rdr != nil {
+ if cl, ok := rdr.(io.Closer); ok {
+ defer deferredClose(&err, cl)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is ", name)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is not registered", name)
+ }
+ } else { // File
+ name = strings.Trim(name, `"`)
+ fileRegisterLock.RLock()
+ fr := fileRegister[name]
+ fileRegisterLock.RUnlock()
+ if mc.cfg.AllowAllFiles || fr {
+ var file *os.File
+ var fi os.FileInfo
+
+ if file, err = os.Open(name); err == nil {
+ defer deferredClose(&err, file)
+
+ // get file size
+ if fi, err = file.Stat(); err == nil {
+ rdr = file
+ if fileSize := int(fi.Size()); fileSize < packetSize {
+ packetSize = fileSize
+ }
+ }
+ }
+ } else {
+ err = fmt.Errorf("local file '%s' is not registered", name)
+ }
+ }
+
+ // send content packets
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
+ data := make([]byte, 4+packetSize)
+ var n int
+ for err == nil {
+ n, err = rdr.Read(data[4:])
+ if n > 0 {
+ if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ return ioErr
+ }
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ }
+
+ // send empty packet (termination)
+ if data == nil {
+ data = make([]byte, 4)
+ }
+ if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ return ioErr
+ }
+
+ // read OK packet
+ if err == nil {
+ return mc.readResultOK()
+ }
+
+ mc.readPacket()
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
new file mode 100644
index 0000000..9ed6408
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -0,0 +1,1286 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// Packets documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+// Read packet to buffer 'data'
+func (mc *mysqlConn) readPacket() ([]byte, error) {
+ var prevData []byte
+ for {
+ // read packet header
+ data, err := mc.buf.readNext(4)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // packet length [24 bit]
+ pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+
+ // check packet sync [8 bit]
+ if data[3] != mc.sequence {
+ if data[3] > mc.sequence {
+ return nil, ErrPktSyncMul
+ }
+ return nil, ErrPktSync
+ }
+ mc.sequence++
+
+ // packets with length 0 terminate a previous packet which is a
+ // multiple of (2^24)−1 bytes long
+ if pktLen == 0 {
+ // there was no previous packet
+ if prevData == nil {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ return prevData, nil
+ }
+
+ // read packet body [pktLen bytes]
+ data, err = mc.buf.readNext(pktLen)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // return data if this was the last packet
+ if pktLen < maxPacketSize {
+ // zero allocations for non-split packets
+ if prevData == nil {
+ return data, nil
+ }
+
+ return append(prevData, data...), nil
+ }
+
+ prevData = append(prevData, data...)
+ }
+}
+
+// Write packet buffer 'data'
+func (mc *mysqlConn) writePacket(data []byte) error {
+ pktLen := len(data) - 4
+
+ if pktLen > mc.maxAllowedPacket {
+ return ErrPktTooLarge
+ }
+
+ for {
+ var size int
+ if pktLen >= maxPacketSize {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+ size = maxPacketSize
+ } else {
+ data[0] = byte(pktLen)
+ data[1] = byte(pktLen >> 8)
+ data[2] = byte(pktLen >> 16)
+ size = pktLen
+ }
+ data[3] = mc.sequence
+
+ // Write packet
+ if mc.writeTimeout > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+ return err
+ }
+ }
+
+ n, err := mc.netConn.Write(data[:4+size])
+ if err == nil && n == 4+size {
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
+ continue
+ }
+
+ // Handle error
+ if err == nil { // n != len(data)
+ mc.cleanup()
+ errLog.Print(ErrMalformPkt)
+ } else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
+ errLog.Print(err)
+ }
+ return ErrInvalidConn
+ }
+}
+
+/******************************************************************************
+* Initialization Process *
+******************************************************************************/
+
+// Handshake Initialization Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
+ data, err = mc.readPacket()
+ if err != nil {
+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+ // in connection initialization we don't risk retrying non-idempotent actions.
+ if err == ErrInvalidConn {
+ return nil, "", driver.ErrBadConn
+ }
+ return
+ }
+
+ if data[0] == iERR {
+ return nil, "", mc.handleErrorPacket(data)
+ }
+
+ // protocol version [1 byte]
+ if data[0] < minProtocolVersion {
+ return nil, "", fmt.Errorf(
+ "unsupported protocol version %d. Version %d or higher is required",
+ data[0],
+ minProtocolVersion,
+ )
+ }
+
+ // server version [null terminated string]
+ // connection id [4 bytes]
+ pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
+
+ // first part of the password cipher [8 bytes]
+ authData := data[pos : pos+8]
+
+ // (filler) always 0x00 [1 byte]
+ pos += 8 + 1
+
+ // capability flags (lower 2 bytes) [2 bytes]
+ mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ if mc.flags&clientProtocol41 == 0 {
+ return nil, "", ErrOldProtocol
+ }
+ if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
+ return nil, "", ErrNoTLS
+ }
+ pos += 2
+
+ if len(data) > pos {
+ // character set [1 byte]
+ // status flags [2 bytes]
+ // capability flags (upper 2 bytes) [2 bytes]
+ // length of auth-plugin-data [1 byte]
+ // reserved (all [00]) [10 bytes]
+ pos += 1 + 2 + 2 + 1 + 10
+
+ // second part of the password cipher [mininum 13 bytes],
+ // where len=MAX(13, length of auth-plugin-data - 8)
+ //
+ // The web documentation is ambiguous about the length. However,
+ // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+ // the 13th byte is "\0 byte, terminating the second part of
+ // a scramble". So the second part of the password cipher is
+ // a NULL terminated string that's at least 13 bytes with the
+ // last byte being NULL.
+ //
+ // The official Python library uses the fixed length 12
+ // which seems to work but technically could have a hidden bug.
+ authData = append(authData, data[pos:pos+12]...)
+ pos += 13
+
+ // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+ // \NUL otherwise
+ if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+ plugin = string(data[pos : pos+end])
+ } else {
+ plugin = string(data[pos:])
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [20]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [8]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+}
+
+// Client Authentication Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
+ // Adjust client flags based on server support
+ clientFlags := clientProtocol41 |
+ clientSecureConn |
+ clientLongPassword |
+ clientTransactions |
+ clientLocalFiles |
+ clientPluginAuth |
+ clientMultiResults |
+ mc.flags&clientLongFlag
+
+ if mc.cfg.ClientFoundRows {
+ clientFlags |= clientFoundRows
+ }
+
+ // To enable TLS / SSL
+ if mc.cfg.tls != nil {
+ clientFlags |= clientSSL
+ }
+
+ if mc.cfg.MultiStatements {
+ clientFlags |= clientMultiStatements
+ }
+
+ // encode length of the auth plugin data
+ var authRespLEIBuf [9]byte
+ authRespLen := len(authResp)
+ authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
+ if len(authRespLEI) > 1 {
+ // if the length can not be written in 1 byte, it must be written as a
+ // length encoded integer
+ clientFlags |= clientPluginAuthLenEncClientData
+ }
+
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
+
+ // To specify a db name
+ if n := len(mc.cfg.DBName); n > 0 {
+ clientFlags |= clientConnectWithDB
+ pktLen += n + 1
+ }
+
+ // Calculate packet length and get buffer with that size
+ data := mc.buf.takeSmallBuffer(pktLen + 4)
+ if data == nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // ClientFlags [32 bit]
+ data[4] = byte(clientFlags)
+ data[5] = byte(clientFlags >> 8)
+ data[6] = byte(clientFlags >> 16)
+ data[7] = byte(clientFlags >> 24)
+
+ // MaxPacketSize [32 bit] (none)
+ data[8] = 0x00
+ data[9] = 0x00
+ data[10] = 0x00
+ data[11] = 0x00
+
+ // Charset [1 byte]
+ var found bool
+ data[12], found = collations[mc.cfg.Collation]
+ if !found {
+ // Note possibility for false negatives:
+ // could be triggered although the collation is valid if the
+ // collations map does not contain entries the server supports.
+ return errors.New("unknown collation")
+ }
+
+ // SSL Connection Request Packet
+ // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+ if mc.cfg.tls != nil {
+ // Send TLS / SSL request packet
+ if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+ return err
+ }
+
+ // Switch to TLS
+ tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ mc.netConn = tlsConn
+ mc.buf.nc = tlsConn
+ }
+
+ // Filler [23 bytes] (all 0x00)
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
+
+ // User [null terminated string]
+ if len(mc.cfg.User) > 0 {
+ pos += copy(data[pos:], mc.cfg.User)
+ }
+ data[pos] = 0x00
+ pos++
+
+ // Auth Data [length encoded integer]
+ pos += copy(data[pos:], authRespLEI)
+ pos += copy(data[pos:], authResp)
+
+ // Databasename [null terminated string]
+ if len(mc.cfg.DBName) > 0 {
+ pos += copy(data[pos:], mc.cfg.DBName)
+ data[pos] = 0x00
+ pos++
+ }
+
+ pos += copy(data[pos:], plugin)
+ data[pos] = 0x00
+ pos++
+
+ // Send Auth packet
+ return mc.writePacket(data[:pos])
+}
+
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
+ pktLen := 4 + len(authData)
+ data := mc.buf.takeSmallBuffer(pktLen)
+ if data == nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add the auth data [EOF]
+ copy(data[4:], authData)
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Command Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) writeCommandPacket(command byte) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data := mc.buf.takeSmallBuffer(4 + 1)
+ if data == nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ pktLen := 1 + len(arg)
+ data := mc.buf.takeBuffer(pktLen + 4)
+ if data == nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg
+ copy(data[5:], arg)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data := mc.buf.takeSmallBuffer(4 + 1 + 4)
+ if data == nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg [32 bit]
+ data[5] = byte(arg)
+ data[6] = byte(arg >> 8)
+ data[7] = byte(arg >> 16)
+ data[8] = byte(arg >> 24)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Result Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // packet indicator
+ switch data[0] {
+
+ case iOK:
+ return nil, "", mc.handleOkPacket(data)
+
+ case iAuthMoreData:
+ return data[1:], "", err
+
+ case iEOF:
+ if len(data) == 1 {
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, "mysql_old_password", nil
+ }
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ if pluginEndIndex < 0 {
+ return nil, "", ErrMalformPkt
+ }
+ plugin := string(data[1:pluginEndIndex])
+ authData := data[pluginEndIndex+1:]
+ return authData, plugin, nil
+
+ default: // Error otherwise
+ return nil, "", mc.handleErrorPacket(data)
+ }
+}
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ if data[0] == iOK {
+ return mc.handleOkPacket(data)
+ }
+ return mc.handleErrorPacket(data)
+}
+
+// Result Set Header Packet
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+ data, err := mc.readPacket()
+ if err == nil {
+ switch data[0] {
+
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ num, _, n := readLengthEncodedInteger(data)
+ if n-len(data) == 0 {
+ return int(num), nil
+ }
+
+ return 0, ErrMalformPkt
+ }
+ return 0, err
+}
+
+// Error Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+ if data[0] != iERR {
+ return ErrMalformPkt
+ }
+
+ // 0xff [1 byte]
+
+ // Error Number [16 bit uint]
+ errno := binary.LittleEndian.Uint16(data[1:3])
+
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
+ pos := 3
+
+ // SQL State [optional: # + 5bytes string]
+ if data[3] == 0x23 {
+ //sqlstate := string(data[4 : 4+5])
+ pos = 9
+ }
+
+ // Error Message [string]
+ return &MySQLError{
+ Number: errno,
+ Message: string(data[pos:]),
+ }
+}
+
+func readStatus(b []byte) statusFlag {
+ return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
+// Ok Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+func (mc *mysqlConn) handleOkPacket(data []byte) error {
+ var n, m int
+
+ // 0x00 [1 byte]
+
+ // Affected rows [Length Coded Binary]
+ mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+
+ // Insert id [Length Coded Binary]
+ mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // server_status [2 bytes]
+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
+ }
+
+ // warning count [2 bytes]
+
+ return nil
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+ columns := make([]mysqlField, count)
+
+ for i := 0; ; i++ {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+ if i == count {
+ return columns, nil
+ }
+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+ }
+
+ // Catalog
+ pos, err := skipLengthEncodedString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Database [len coded string]
+ n, err := skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Table [len coded string]
+ if mc.cfg.ColumnsWithAlias {
+ tableName, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ columns[i].tableName = string(tableName)
+ } else {
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ }
+
+ // Original table [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Name [len coded string]
+ name, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ columns[i].name = string(name)
+ pos += n
+
+ // Original name [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Filler [uint8]
+ pos++
+
+ // Charset [charset, collation uint8]
+ columns[i].charSet = data[pos]
+ pos += 2
+
+ // Length [uint32]
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
+
+ // Field type [uint8]
+ columns[i].fieldType = fieldType(data[pos])
+ pos++
+
+ // Flags [uint16]
+ columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ pos += 2
+
+ // Decimals [uint8]
+ columns[i].decimals = data[pos]
+ //pos++
+
+ // Default value [len coded binary]
+ //if pos < len(data) {
+ // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+ //}
+ }
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+func (rows *textRows) readRow(dest []driver.Value) error {
+ mc := rows.mc
+
+ if rows.rs.done {
+ return io.EOF
+ }
+
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ // server_status [2 bytes]
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ if data[0] == iERR {
+ rows.mc = nil
+ return mc.handleErrorPacket(data)
+ }
+
+ // RowSet Packet
+ var n int
+ var isNull bool
+ pos := 0
+
+ for i := range dest {
+ // Read bytes and convert to string
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ if !mc.parseTime {
+ continue
+ } else {
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeTimestamp, fieldTypeDateTime,
+ fieldTypeDate, fieldTypeNewDate:
+ dest[i], err = parseDateTime(
+ string(dest[i].([]byte)),
+ mc.cfg.Loc,
+ )
+ if err == nil {
+ continue
+ }
+ default:
+ continue
+ }
+ }
+
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err // err != nil
+ }
+
+ return nil
+}
+
+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+func (mc *mysqlConn) readUntilEOF() error {
+ for {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ switch data[0] {
+ case iERR:
+ return mc.handleErrorPacket(data)
+ case iEOF:
+ if len(data) == 5 {
+ mc.status = readStatus(data[3:])
+ }
+ return nil
+ }
+ }
+}
+
+/******************************************************************************
+* Prepared Statements *
+******************************************************************************/
+
+// Prepare Result Packets
+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+ data, err := stmt.mc.readPacket()
+ if err == nil {
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ return 0, stmt.mc.handleErrorPacket(data)
+ }
+
+ // statement id [4 bytes]
+ stmt.id = binary.LittleEndian.Uint32(data[1:5])
+
+ // Column count [16 bit uint]
+ columnCount := binary.LittleEndian.Uint16(data[5:7])
+
+ // Param count [16 bit uint]
+ stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
+
+ // Reserved [8 bit]
+
+ // Warning count [16 bit uint]
+
+ return columnCount, nil
+ }
+ return 0, err
+}
+
+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+ maxLen := stmt.mc.maxAllowedPacket - 1
+ pktLen := maxLen
+
+ // After the header (bytes 0-3) follows before the data:
+ // 1 byte command
+ // 4 bytes stmtID
+ // 2 bytes paramID
+ const dataOffset = 1 + 4 + 2
+
+ // Cannot use the write buffer since
+ // a) the buffer is too small
+ // b) it is in use
+ data := make([]byte, 4+1+4+2+len(arg))
+
+ copy(data[4+dataOffset:], arg)
+
+ for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+ if dataOffset+argLen < maxLen {
+ pktLen = dataOffset + argLen
+ }
+
+ stmt.mc.sequence = 0
+ // Add command byte [1 byte]
+ data[4] = comStmtSendLongData
+
+ // Add stmtID [32 bit]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // Add paramID [16 bit]
+ data[9] = byte(paramID)
+ data[10] = byte(paramID >> 8)
+
+ // Send CMD packet
+ err := stmt.mc.writePacket(data[:4+pktLen])
+ if err == nil {
+ data = data[pktLen-dataOffset:]
+ continue
+ }
+ return err
+
+ }
+
+ // Reset Packet Sequence
+ stmt.mc.sequence = 0
+ return nil
+}
+
+// Execute Prepared Statement
+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+ if len(args) != stmt.paramCount {
+ return fmt.Errorf(
+ "argument count mismatch (got: %d; has: %d)",
+ len(args),
+ stmt.paramCount,
+ )
+ }
+
+ const minPktLen = 4 + 1 + 4 + 1 + 4
+ mc := stmt.mc
+
+ // Determine threshould dynamically to avoid packet size shortage.
+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+ if longDataSize < 64 {
+ longDataSize = 64
+ }
+
+ // Reset packet-sequence
+ mc.sequence = 0
+
+ var data []byte
+
+ if len(args) == 0 {
+ data = mc.buf.takeBuffer(minPktLen)
+ } else {
+ data = mc.buf.takeCompleteBuffer()
+ }
+ if data == nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // command [1 byte]
+ data[4] = comStmtExecute
+
+ // statement_id [4 bytes]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+ data[9] = 0x00
+
+ // iteration_count (uint32(1)) [4 bytes]
+ data[10] = 0x01
+ data[11] = 0x00
+ data[12] = 0x00
+ data[13] = 0x00
+
+ if len(args) > 0 {
+ pos := minPktLen
+
+ var nullMask []byte
+ if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) {
+ // buffer has to be extended but we don't know by how much so
+ // we depend on append after all data with known sizes fit.
+ // We stop at that because we deal with a lot of columns here
+ // which makes the required allocation size hard to guess.
+ tmp := make([]byte, pos+maskLen+typesLen)
+ copy(tmp[:pos], data[:pos])
+ data = tmp
+ nullMask = data[pos : pos+maskLen]
+ pos += maskLen
+ } else {
+ nullMask = data[pos : pos+maskLen]
+ for i := 0; i < maskLen; i++ {
+ nullMask[i] = 0
+ }
+ pos += maskLen
+ }
+
+ // newParameterBoundFlag 1 [1 byte]
+ data[pos] = 0x01
+ pos++
+
+ // type of each parameter [len(args)*2 bytes]
+ paramTypes := data[pos:]
+ pos += len(args) * 2
+
+ // value of each parameter [n bytes]
+ paramValues := data[pos:pos]
+ valuesCap := cap(paramValues)
+
+ for i, arg := range args {
+ // build NULL-bitmap
+ if arg == nil {
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+ continue
+ }
+
+ // cache types and values
+ switch v := arg.(type) {
+ case int64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case float64:
+ paramTypes[i+i] = byte(fieldTypeDouble)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ math.Float64bits(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(math.Float64bits(v))...,
+ )
+ }
+
+ case bool:
+ paramTypes[i+i] = byte(fieldTypeTiny)
+ paramTypes[i+i+1] = 0x00
+
+ if v {
+ paramValues = append(paramValues, 0x01)
+ } else {
+ paramValues = append(paramValues, 0x00)
+ }
+
+ case []byte:
+ // Common case (non-nil value) first
+ if v != nil {
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, v); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+
+ // Handle []byte(nil) as a NULL value
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+
+ case string:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+ return err
+ }
+ }
+
+ case time.Time:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ var a [64]byte
+ var b = a[:0]
+
+ if v.IsZero() {
+ b = append(b, "0000-00-00"...)
+ } else {
+ b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
+ }
+
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(b)),
+ )
+ paramValues = append(paramValues, b...)
+
+ default:
+ return fmt.Errorf("cannot convert type: %T", arg)
+ }
+ }
+
+ // Check if param values exceeded the available buffer
+ // In that case we must build the data packet with the new values buffer
+ if valuesCap != cap(paramValues) {
+ data = append(data[:pos], paramValues...)
+ mc.buf.buf = data
+ }
+
+ pos += len(paramValues)
+ data = data[:pos]
+ }
+
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) discardResults() error {
+ for mc.status&statusMoreResultsExists != 0 {
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+func (rows *binaryRows) readRow(dest []driver.Value) error {
+ data, err := rows.mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ mc := rows.mc
+ rows.mc = nil
+
+ // Error otherwise
+ return mc.handleErrorPacket(data)
+ }
+
+ // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
+ pos := 1 + (len(dest)+7+2)>>3
+ nullMask := data[1:pos]
+
+ for i := range dest {
+ // Field is NULL
+ // (byte >> bit-pos) % 2 == 1
+ if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+ dest[i] = nil
+ continue
+ }
+
+ // Convert to byte-coded string
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeNULL:
+ dest[i] = nil
+ continue
+
+ // Numeric Types
+ case fieldTypeTiny:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(data[pos])
+ } else {
+ dest[i] = int64(int8(data[pos]))
+ }
+ pos++
+ continue
+
+ case fieldTypeShort, fieldTypeYear:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ } else {
+ dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+ }
+ pos += 2
+ continue
+
+ case fieldTypeInt24, fieldTypeLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ } else {
+ dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ }
+ pos += 4
+ continue
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ val := binary.LittleEndian.Uint64(data[pos : pos+8])
+ if val > math.MaxInt64 {
+ dest[i] = uint64ToString(val)
+ } else {
+ dest[i] = int64(val)
+ }
+ } else {
+ dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ }
+ pos += 8
+ continue
+
+ case fieldTypeFloat:
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ pos += 4
+ continue
+
+ case fieldTypeDouble:
+ dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ continue
+
+ // Length coded Binary Strings
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ var isNull bool
+ var n int
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ continue
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err
+
+ case
+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
+ num, isNull, n := readLengthEncodedInteger(data[pos:])
+ pos += n
+
+ switch {
+ case isNull:
+ dest[i] = nil
+ continue
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
+ // database/sql does not support an equivalent to TIME, return a string
+ var dstlen uint8
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 8
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 8 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
+ case rows.mc.parseTime:
+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+ default:
+ var dstlen uint8
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
+ dstlen = 10
+ } else {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 19
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 19 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
+ }
+
+ if err == nil {
+ pos += int(num)
+ continue
+ } else {
+ return err
+ }
+
+ // Please report if this happens!
+ default:
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
new file mode 100644
index 0000000..c6438d0
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -0,0 +1,22 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlResult struct {
+ affectedRows int64
+ insertId int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertId, nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
new file mode 100644
index 0000000..d3b1e28
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -0,0 +1,216 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+ "math"
+ "reflect"
+)
+
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
+}
+
+type mysqlRows struct {
+ mc *mysqlConn
+ rs resultSet
+ finish func()
+}
+
+type binaryRows struct {
+ mysqlRows
+}
+
+type textRows struct {
+ mysqlRows
+}
+
+func (rows *mysqlRows) Columns() []string {
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+ for i := range columns {
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
+ } else {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+ } else {
+ for i := range columns {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+
+ rows.rs.columnNames = columns
+ return columns
+}
+
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
+ mc := rows.mc
+ if mc == nil {
+ return nil
+ }
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
+ if err == nil {
+ if err = mc.discardResults(); err != nil {
+ return err
+ }
+ }
+
+ rows.mc = nil
+ return err
+}
+
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
+ }
+ rows.rs.done = true
+ }
+
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
+ }
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
+
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
new file mode 100644
index 0000000..ce7fe4c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -0,0 +1,211 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+type mysqlStmt struct {
+ mc *mysqlConn
+ id uint32
+ paramCount int
+}
+
+func (stmt *mysqlStmt) Close() error {
+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
+ // driver.Stmt.Close can be called more than once, thus this function
+ // has to be idempotent.
+ // See also Issue #450 and golang/go#16019.
+ //errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+ stmt.mc = nil
+ return err
+}
+
+func (stmt *mysqlStmt) NumInput() int {
+ return stmt.paramCount
+}
+
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return converter{}
+}
+
+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+ if stmt.mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
+}
+
+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ rows := new(binaryRows)
+
+ if resLen > 0 {
+ rows.mc = mc
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ return rows, err
+}
+
+type converter struct{}
+
+// ConvertValue mirrors the reference/default converter in database/sql/driver
+// with _one_ exception. We support uint64 with their high bit and the default
+// implementation does not. This function should be kept in sync with
+// database/sql/driver defaultConverter.ConvertValue() except for that
+// deliberate difference.
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+ if driver.IsValue(v) {
+ return v, nil
+ }
+
+ if vr, ok := v.(driver.Valuer); ok {
+ sv, err := callValuerValue(vr)
+ if err != nil {
+ return nil, err
+ }
+ if !driver.IsValue(sv) {
+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+ }
+ return sv, nil
+ }
+
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Ptr:
+ // indirect pointers
+ if rv.IsNil() {
+ return nil, nil
+ } else {
+ return c.ConvertValue(rv.Elem().Interface())
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(rv.Uint()), nil
+ case reflect.Uint64:
+ u64 := rv.Uint()
+ if u64 >= 1<<63 {
+ return strconv.FormatUint(u64, 10), nil
+ }
+ return int64(u64), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ case reflect.Bool:
+ return rv.Bool(), nil
+ case reflect.Slice:
+ ek := rv.Type().Elem().Kind()
+ if ek == reflect.Uint8 {
+ return rv.Bytes(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
+ case reflect.String:
+ return rv.String(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This is an exact copy of the same-named unexported function from the
+// database/sql package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
new file mode 100644
index 0000000..417d727
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -0,0 +1,31 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlTx struct {
+ mc *mysqlConn
+}
+
+func (tx *mysqlTx) Commit() (err error) {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("COMMIT")
+ tx.mc = nil
+ return
+}
+
+func (tx *mysqlTx) Rollback() (err error) {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("ROLLBACK")
+ tx.mc = nil
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
new file mode 100644
index 0000000..ca5d47d
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -0,0 +1,726 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Registry for custom tls.Configs
+var (
+ tlsConfigLock sync.RWMutex
+ tlsConfigRegistry map[string]*tls.Config
+)
+
+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+// Use the key as a value in the DSN where tls=value.
+//
+// Note: The provided tls.Config is exclusively owned by the driver after
+// registering it.
+//
+// rootCertPool := x509.NewCertPool()
+// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+// log.Fatal("Failed to append PEM.")
+// }
+// clientCert := make([]tls.Certificate, 0, 1)
+// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// clientCert = append(clientCert, certs)
+// mysql.RegisterTLSConfig("custom", &tls.Config{
+// RootCAs: rootCertPool,
+// Certificates: clientCert,
+// })
+// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+//
+func RegisterTLSConfig(key string, config *tls.Config) error {
+ if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
+ return fmt.Errorf("key '%s' is reserved", key)
+ }
+
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry == nil {
+ tlsConfigRegistry = make(map[string]*tls.Config)
+ }
+
+ tlsConfigRegistry[key] = config
+ tlsConfigLock.Unlock()
+ return nil
+}
+
+// DeregisterTLSConfig removes the tls.Config associated with key.
+func DeregisterTLSConfig(key string) {
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry != nil {
+ delete(tlsConfigRegistry, key)
+ }
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegistry[key]; ok {
+ config = cloneTLSConfig(v)
+ }
+ tlsConfigLock.RUnlock()
+ return
+}
+
+// Returns the bool value of the input.
+// The 2nd return value indicates if the input was a valid bool value
+func readBool(input string) (value bool, valid bool) {
+ switch input {
+ case "1", "true", "TRUE", "True":
+ return true, true
+ case "0", "false", "FALSE", "False":
+ return false, true
+ }
+
+ // Not a valid bool value
+ return
+}
+
+/******************************************************************************
+* Time related utils *
+******************************************************************************/
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// This NullTime implementation is not driver-specific
+type NullTime struct {
+ Time time.Time
+ Valid bool // Valid is true if Time is not NULL
+}
+
+// Scan implements the Scanner interface.
+// The value type must be time.Time or string / []byte (formatted time-string),
+// otherwise Scan fails.
+func (nt *NullTime) Scan(value interface{}) (err error) {
+ if value == nil {
+ nt.Time, nt.Valid = time.Time{}, false
+ return
+ }
+
+ switch v := value.(type) {
+ case time.Time:
+ nt.Time, nt.Valid = v, true
+ return
+ case []byte:
+ nt.Time, err = parseDateTime(string(v), time.UTC)
+ nt.Valid = (err == nil)
+ return
+ case string:
+ nt.Time, err = parseDateTime(v, time.UTC)
+ nt.Valid = (err == nil)
+ return
+ }
+
+ nt.Valid = false
+ return fmt.Errorf("Can't convert %T to time.Time", value)
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+ if !nt.Valid {
+ return nil, nil
+ }
+ return nt.Time, nil
+}
+
+func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
+ base := "0000-00-00 00:00:00.0000000"
+ switch len(str) {
+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+ if str == base[:len(str)] {
+ return
+ }
+ t, err = time.Parse(timeFormat[:len(str)], str)
+ default:
+ err = fmt.Errorf("invalid time string: %s", str)
+ return
+ }
+
+ // Adjust location
+ if err == nil && loc != time.UTC {
+ y, mo, d := t.Date()
+ h, mi, s := t.Clock()
+ t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
+ }
+
+ return
+}
+
+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+ switch num {
+ case 0:
+ return time.Time{}, nil
+ case 4:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ 0, 0, 0, 0,
+ loc,
+ ), nil
+ case 7:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ 0,
+ loc,
+ ), nil
+ case 11:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+ loc,
+ ), nil
+ }
+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+}
+
+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+// if the DATE or DATETIME has the zero value.
+// It must never be changed.
+// The current behavior depends on database/sql copying the result.
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+
+func appendMicrosecs(dst, src []byte, decimals int) []byte {
+ if decimals <= 0 {
+ return dst
+ }
+ if len(src) == 0 {
+ return append(dst, ".000000"[:decimals+1]...)
+ }
+
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 := byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 := byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 := byte(microsecs)
+
+ switch decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ )
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ )
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ )
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ )
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ )
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ )
+ }
+}
+
+func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[:length], nil
+ }
+ var dst []byte // return value
+ var p1, p2, p3 byte // current digit pair
+
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
+ }
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
+ year := binary.LittleEndian.Uint16(src[:2])
+ pt := year / 100
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+
+ // p1 is 2-digit hour, src is after hour
+ p2, p3 = src[0], src[1]
+ dst = append(dst,
+ digits10[p1], digits01[p1], ':',
+ digits10[p2], digits01[p2], ':',
+ digits10[p3], digits01[p3],
+ )
+ return appendMicrosecs(dst, src[2:], int(length)-20), nil
+}
+
+func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[11 : 11+length], nil
+ }
+ var dst []byte // return value
+
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
+ default:
+ return nil, fmt.Errorf("illegal TIME length %d", length)
+ }
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ days := binary.LittleEndian.Uint32(src[1:5])
+ hours := int64(days)*24 + int64(src[5])
+
+ if hours >= 100 {
+ dst = strconv.AppendInt(dst, hours, 10)
+ } else {
+ dst = append(dst, digits10[hours], digits01[hours])
+ }
+
+ min, sec := src[6], src[7]
+ dst = append(dst, ':',
+ digits10[min], digits01[min], ':',
+ digits10[sec], digits01[sec],
+ )
+ return appendMicrosecs(dst, src[8:], int(length)-9), nil
+}
+
+/******************************************************************************
+* Convert from and to bytes *
+******************************************************************************/
+
+func uint64ToBytes(n uint64) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ byte(n >> 32),
+ byte(n >> 40),
+ byte(n >> 48),
+ byte(n >> 56),
+ }
+}
+
+func uint64ToString(n uint64) []byte {
+ var a [20]byte
+ i := 20
+
+ // U+0030 = 0
+ // ...
+ // U+0039 = 9
+
+ var q uint64
+ for n >= 10 {
+ i--
+ q = n / 10
+ a[i] = uint8(n-q*10) + 0x30
+ n = q
+ }
+
+ i--
+ a[i] = uint8(n) + 0x30
+
+ return a[i:]
+}
+
+// treats string value as unsigned integer representation
+func stringToInt(b []byte) int {
+ val := 0
+ for i := range b {
+ val *= 10
+ val += int(b[i] - 0x30)
+ }
+ return val
+}
+
+// returns the string read as a bytes slice, wheter the value is NULL,
+// the number of bytes read and an error, in case the string is longer than
+// the input slice
+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+ // Get length
+ num, isNull, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return b[n:n], isNull, n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return b[n-int(num) : n : n], false, n, nil
+ }
+ return nil, false, n, io.EOF
+}
+
+// returns the number of bytes skipped and an error, in case the string is
+// longer than the input slice
+func skipLengthEncodedString(b []byte) (int, error) {
+ // Get length
+ num, _, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+// returns the number read, whether the value is NULL and the number of bytes read
+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+ // See issue #349
+ if len(b) == 0 {
+ return 0, true, 1
+ }
+
+ switch b[0] {
+ // 251: NULL
+ case 0xfb:
+ return 0, true, 1
+
+ // 252: value of following 2
+ case 0xfc:
+ return uint64(b[1]) | uint64(b[2])<<8, false, 3
+
+ // 253: value of following 3
+ case 0xfd:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+
+ // 254: value of following 8
+ case 0xfe:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56,
+ false, 9
+ }
+
+ // 0-250: value of first byte
+ return uint64(b[0]), false, 1
+}
+
+// encodes a uint64 value and appends it to the given bytes slice
+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+ switch {
+ case n <= 250:
+ return append(b, byte(n))
+
+ case n <= 0xffff:
+ return append(b, 0xfc, byte(n), byte(n>>8))
+
+ case n <= 0xffffff:
+ return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+ }
+ return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+ byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+ newSize := len(buf) + appendSize
+ if cap(buf) < newSize {
+ // Grow buffer exponentially
+ newBuf := make([]byte, len(buf)*2+appendSize)
+ copy(newBuf, buf)
+ buf = newBuf
+ }
+ return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _noCopy noCopy
+ value uint32
+}
+
+// IsSet returns wether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// TrySet sets the value of the bool and returns wether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) == 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicError is a wrapper for atomically accessed error values
+type atomicError struct {
+ _noCopy noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
new file mode 100644
index 0000000..f595634
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
@@ -0,0 +1,40 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.7
+// +build !go1.8
+
+package mysql
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
new file mode 100644
index 0000000..c35c2a6
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
@@ -0,0 +1,50 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+)
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+ }
+}
diff --git a/vendor/github.com/go-xorm/xorm/.drone.yml b/vendor/github.com/go-xorm/xorm/.drone.yml
new file mode 100644
index 0000000..be97845
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/.drone.yml
@@ -0,0 +1,124 @@
+workspace:
+ base: /go
+ path: src/github.com/go-xorm/xorm
+
+clone:
+ git:
+ image: plugins/git:next
+ depth: 50
+ tags: true
+
+services:
+ mysql:
+ image: mysql:5.7
+ environment:
+ - MYSQL_DATABASE=xorm_test
+ - MYSQL_ALLOW_EMPTY_PASSWORD=yes
+ when:
+ event: [ push, tag, pull_request ]
+
+ pgsql:
+ image: postgres:9.5
+ environment:
+ - POSTGRES_USER=postgres
+ - POSTGRES_DB=xorm_test
+ when:
+ event: [ push, tag, pull_request ]
+
+ #mssql:
+ # image: microsoft/mssql-server-linux:2017-CU11
+ # environment:
+ # - ACCEPT_EULA=Y
+ # - SA_PASSWORD=yourStrong(!)Password
+ # - MSSQL_PID=Developer
+ # commands:
+ # - echo 'CREATE DATABASE xorm_test' > create.sql
+ # - /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P yourStrong(!)Password -i "create.sql"
+
+matrix:
+ GO_VERSION:
+ - 1.10
+ - 1.11
+ - 1.12
+
+pipeline:
+ init_postgres:
+ image: postgres:9.5
+ commands:
+ # wait for postgres service to become available
+ - |
+ until psql -U postgres -d xorm_test -h pgsql \
+ -c "SELECT 1;" >/dev/null 2>&1; do sleep 1; done
+ # query the database
+ - |
+ psql -U postgres -d xorm_test -h pgsql \
+ -c "create schema xorm;"
+
+ build:
+ image: golang:${GO_VERSION}
+ commands:
+ - go get -t -d -v ./...
+ - go get -u xorm.io/core
+ - go get -u xorm.io/builder
+ - go build -v
+ when:
+ event: [ push, pull_request ]
+
+ test-sqlite:
+ image: golang:${GO_VERSION}
+ commands:
+ - go get -u github.com/wadey/gocovmerge
+ - go test -v -race -db="sqlite3" -conn_str="./test.db" -coverprofile=coverage1-1.txt -covermode=atomic
+ - go test -v -race -db="sqlite3" -conn_str="./test.db" -cache=true -coverprofile=coverage1-2.txt -covermode=atomic
+ when:
+ event: [ push, pull_request ]
+
+ test-mysql:
+ image: golang:${GO_VERSION}
+ commands:
+ - go test -v -race -db="mysql" -conn_str="root:@tcp(mysql)/xorm_test" -coverprofile=coverage2-1.txt -covermode=atomic
+ - go test -v -race -db="mysql" -conn_str="root:@tcp(mysql)/xorm_test" -cache=true -coverprofile=coverage2-2.txt -covermode=atomic
+ when:
+ event: [ push, pull_request ]
+
+ test-mysql-utf8mb4:
+ image: golang:${GO_VERSION}
+ commands:
+ - go test -v -race -db="mysql" -conn_str="root:@tcp(mysql)/xorm_test?charset=utf8mb4" -coverprofile=coverage2.1-1.txt -covermode=atomic
+ - go test -v -race -db="mysql" -conn_str="root:@tcp(mysql)/xorm_test?charset=utf8mb4" -cache=true -coverprofile=coverage2.1-2.txt -covermode=atomic
+ when:
+ event: [ push, pull_request ]
+
+ test-mymysql:
+ image: golang:${GO_VERSION}
+ commands:
+ - go test -v -race -db="mymysql" -conn_str="tcp:mysql:3306*xorm_test/root/" -coverprofile=coverage3-1.txt -covermode=atomic
+ - go test -v -race -db="mymysql" -conn_str="tcp:mysql:3306*xorm_test/root/" -cache=true -coverprofile=coverage3-2.txt -covermode=atomic
+ when:
+ event: [ push, pull_request ]
+
+ test-postgres:
+ image: golang:${GO_VERSION}
+ commands:
+ - go test -v -race -db="postgres" -conn_str="postgres://postgres:@pgsql/xorm_test?sslmode=disable" -coverprofile=coverage4-1.txt -covermode=atomic
+ - go test -v -race -db="postgres" -conn_str="postgres://postgres:@pgsql/xorm_test?sslmode=disable" -cache=true -coverprofile=coverage4-2.txt -covermode=atomic
+ when:
+ event: [ push, pull_request ]
+
+ test-postgres-schema:
+ image: golang:${GO_VERSION}
+ commands:
+ - go test -v -race -db="postgres" -conn_str="postgres://postgres:@pgsql/xorm_test?sslmode=disable" -schema=xorm -coverprofile=coverage5-1.txt -covermode=atomic
+ - go test -v -race -db="postgres" -conn_str="postgres://postgres:@pgsql/xorm_test?sslmode=disable" -schema=xorm -cache=true -coverprofile=coverage5-2.txt -covermode=atomic
+ - gocovmerge coverage1-1.txt coverage1-2.txt coverage2-1.txt coverage2-2.txt coverage2.1-1.txt coverage2.1-2.txt coverage3-1.txt coverage3-2.txt coverage4-1.txt coverage4-2.txt coverage5-1.txt coverage5-2.txt > coverage.txt
+ when:
+ event: [ push, pull_request ]
+
+ #coverage:
+ # image: robertstettner/drone-codecov
+ # secrets: [ codecov_token ]
+ # files:
+ # - coverage.txt
+ # when:
+ # event: [ push, pull_request ]
+ # branch: [ master ]
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/.gitignore b/vendor/github.com/go-xorm/xorm/.gitignore
new file mode 100644
index 0000000..f1757b9
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/.gitignore
@@ -0,0 +1,33 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.db
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+*.log
+.vendor
+temp_test.go
+.vscode
+xorm.test
+*.sqlite3
+test.db.sql
+
+.idea/
diff --git a/vendor/github.com/go-xorm/xorm/CONTRIBUTING.md b/vendor/github.com/go-xorm/xorm/CONTRIBUTING.md
new file mode 100644
index 0000000..37f4bc5
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/CONTRIBUTING.md
@@ -0,0 +1,46 @@
+## Contributing to xorm
+
+`xorm` has a backlog of [pull requests](https://help.github.com/articles/using-pull-requests), but contributions are still very
+much welcome. You can help with patch review, submitting bug reports,
+or adding new functionality. There is no formal style guide, but
+please conform to the style of existing code and general Go formatting
+conventions when submitting patches.
+
+* [fork a repo](https://help.github.com/articles/fork-a-repo)
+* [creating a pull request ](https://help.github.com/articles/creating-a-pull-request)
+
+### Language
+
+Since `xorm` is a world-wide open source project, please describe your issues or code changes in English as soon as possible.
+
+### Sign your codes with comments
+```
+// !! your comments
+
+e.g.,
+
+// !lunny! this is comments made by lunny
+```
+
+### Patch review
+
+Help review existing open [pull requests](https://help.github.com/articles/using-pull-requests) by commenting on the code or
+proposed functionality.
+
+### Bug reports
+
+We appreciate any bug reports, but especially ones with self-contained
+(doesn't depend on code outside of xorm), minimal (can't be simplified
+further) test cases. It's especially helpful if you can submit a pull
+request with just the failing test case(you can find some example test file like [session_get_test.go](https://github.com/go-xorm/xorm/blob/master/session_get_test.go)).
+
+If you implements a new database interface, you maybe need to add a test_.sh file.
+For example, [mysql_test.go](https://github.com/go-xorm/xorm/blob/master/test_mysql.sh)
+
+### New functionality
+
+There are a number of pending patches for new functionality, so
+additional feature patches will take a while to merge. Still, patches
+are generally reviewed based on usefulness and complexity in addition
+to time-in-queue, so if you have a knockout idea, take a shot. Feel
+free to open an issue discussion your proposed patch beforehand.
diff --git a/vendor/github.com/go-xorm/xorm/LICENSE b/vendor/github.com/go-xorm/xorm/LICENSE
new file mode 100644
index 0000000..84d2ae5
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 - 2015 The Xorm Authors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the {organization} nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-xorm/xorm/README.md b/vendor/github.com/go-xorm/xorm/README.md
new file mode 100644
index 0000000..62b40ba
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/README.md
@@ -0,0 +1,503 @@
+# xorm
+
+[中文](https://github.com/go-xorm/xorm/blob/master/README_CN.md)
+
+Xorm is a simple and powerful ORM for Go.
+
+[![CircleCI](https://circleci.com/gh/go-xorm/xorm.svg?style=shield)](https://circleci.com/gh/go-xorm/xorm) [![codecov](https://codecov.io/gh/go-xorm/xorm/branch/master/graph/badge.svg)](https://codecov.io/gh/go-xorm/xorm)
+[![](https://goreportcard.com/badge/github.com/go-xorm/xorm)](https://goreportcard.com/report/github.com/go-xorm/xorm)
+[![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3)
+
+## Features
+
+* Struct <-> Table Mapping Support
+
+* Chainable APIs
+
+* Transaction Support
+
+* Both ORM and raw SQL operation Support
+
+* Sync database schema Support
+
+* Query Cache speed up
+
+* Database Reverse support, See [Xorm Tool README](https://github.com/go-xorm/cmd/blob/master/README.md)
+
+* Simple cascade loading support
+
+* Optimistic Locking support
+
+* SQL Builder support via [xorm.io/builder](https://xorm.io/builder)
+
+* Automatical Read/Write seperatelly
+
+* Postgres schema support
+
+* Context Cache support
+
+## Drivers Support
+
+Drivers for Go's sql package which currently support database/sql includes:
+
+* Mysql: [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql)
+
+* MyMysql: [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/tree/master/godrv)
+
+* Postgres: [github.com/lib/pq](https://github.com/lib/pq)
+
+* Tidb: [github.com/pingcap/tidb](https://github.com/pingcap/tidb)
+
+* SQLite: [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3)
+
+* MsSql: [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb)
+
+* Oracle: [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (experiment)
+
+## Installation
+
+ go get github.com/go-xorm/xorm
+
+## Documents
+
+* [Manual](http://xorm.io/docs)
+
+* [GoDoc](http://godoc.org/github.com/go-xorm/xorm)
+
+## Quick Start
+
+* Create Engine
+
+```Go
+engine, err := xorm.NewEngine(driverName, dataSourceName)
+```
+
+* Define a struct and Sync2 table struct to database
+
+```Go
+type User struct {
+ Id int64
+ Name string
+ Salt string
+ Age int
+ Passwd string `xorm:"varchar(200)"`
+ Created time.Time `xorm:"created"`
+ Updated time.Time `xorm:"updated"`
+}
+
+err := engine.Sync2(new(User))
+```
+
+* Create Engine Group
+
+```Go
+dataSourceNameSlice := []string{masterDataSourceName, slave1DataSourceName, slave2DataSourceName}
+engineGroup, err := xorm.NewEngineGroup(driverName, dataSourceNameSlice)
+```
+
+```Go
+masterEngine, err := xorm.NewEngine(driverName, masterDataSourceName)
+slave1Engine, err := xorm.NewEngine(driverName, slave1DataSourceName)
+slave2Engine, err := xorm.NewEngine(driverName, slave2DataSourceName)
+engineGroup, err := xorm.NewEngineGroup(masterEngine, []*Engine{slave1Engine, slave2Engine})
+```
+
+Then all place where `engine` you can just use `engineGroup`.
+
+* `Query` runs a SQL string, the returned results is `[]map[string][]byte`, `QueryString` returns `[]map[string]string`, `QueryInterface` returns `[]map[string]interface{}`.
+
+```Go
+results, err := engine.Query("select * from user")
+results, err := engine.Where("a = 1").Query()
+
+results, err := engine.QueryString("select * from user")
+results, err := engine.Where("a = 1").QueryString()
+
+results, err := engine.QueryInterface("select * from user")
+results, err := engine.Where("a = 1").QueryInterface()
+```
+
+* `Exec` runs a SQL string, it returns `affected` and `error`
+
+```Go
+affected, err := engine.Exec("update user set age = ? where name = ?", age, name)
+```
+
+* `Insert` one or multiple records to database
+
+```Go
+affected, err := engine.Insert(&user)
+// INSERT INTO struct () values ()
+
+affected, err := engine.Insert(&user1, &user2)
+// INSERT INTO struct1 () values ()
+// INSERT INTO struct2 () values ()
+
+affected, err := engine.Insert(&users)
+// INSERT INTO struct () values (),(),()
+
+affected, err := engine.Insert(&user1, &users)
+// INSERT INTO struct1 () values ()
+// INSERT INTO struct2 () values (),(),()
+```
+
+* `Get` query one record from database
+
+```Go
+has, err := engine.Get(&user)
+// SELECT * FROM user LIMIT 1
+
+has, err := engine.Where("name = ?", name).Desc("id").Get(&user)
+// SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1
+
+var name string
+has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name)
+// SELECT name FROM user WHERE id = ?
+
+var id int64
+has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id)
+has, err := engine.SQL("select id from user").Get(&id)
+// SELECT id FROM user WHERE name = ?
+
+var valuesMap = make(map[string]string)
+has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap)
+// SELECT * FROM user WHERE id = ?
+
+var valuesSlice = make([]interface{}, len(cols))
+has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice)
+// SELECT col1, col2, col3 FROM user WHERE id = ?
+```
+
+* `Exist` check if one record exist on table
+
+```Go
+has, err := testEngine.Exist(new(RecordExist))
+// SELECT * FROM record_exist LIMIT 1
+
+has, err = testEngine.Exist(&RecordExist{
+ Name: "test1",
+ })
+// SELECT * FROM record_exist WHERE name = ? LIMIT 1
+
+has, err = testEngine.Where("name = ?", "test1").Exist(&RecordExist{})
+// SELECT * FROM record_exist WHERE name = ? LIMIT 1
+
+has, err = testEngine.SQL("select * from record_exist where name = ?", "test1").Exist()
+// select * from record_exist where name = ?
+
+has, err = testEngine.Table("record_exist").Exist()
+// SELECT * FROM record_exist LIMIT 1
+
+has, err = testEngine.Table("record_exist").Where("name = ?", "test1").Exist()
+// SELECT * FROM record_exist WHERE name = ? LIMIT 1
+```
+
+* `Find` query multiple records from database, also you can use join and extends
+
+```Go
+var users []User
+err := engine.Where("name = ?", name).And("age > 10").Limit(10, 0).Find(&users)
+// SELECT * FROM user WHERE name = ? AND age > 10 limit 10 offset 0
+
+type Detail struct {
+ Id int64
+ UserId int64 `xorm:"index"`
+}
+
+type UserDetail struct {
+ User `xorm:"extends"`
+ Detail `xorm:"extends"`
+}
+
+var users []UserDetail
+err := engine.Table("user").Select("user.*, detail.*").
+ Join("INNER", "detail", "detail.user_id = user.id").
+ Where("user.name = ?", name).Limit(10, 0).
+ Find(&users)
+// SELECT user.*, detail.* FROM user INNER JOIN detail WHERE user.name = ? limit 10 offset 0
+```
+
+* `Iterate` and `Rows` query multiple records and record by record handle, there are two methods Iterate and Rows
+
+```Go
+err := engine.Iterate(&User{Name:name}, func(idx int, bean interface{}) error {
+ user := bean.(*User)
+ return nil
+})
+// SELECT * FROM user
+
+err := engine.BufferSize(100).Iterate(&User{Name:name}, func(idx int, bean interface{}) error {
+ user := bean.(*User)
+ return nil
+})
+// SELECT * FROM user Limit 0, 100
+// SELECT * FROM user Limit 101, 100
+
+rows, err := engine.Rows(&User{Name:name})
+// SELECT * FROM user
+defer rows.Close()
+bean := new(Struct)
+for rows.Next() {
+ err = rows.Scan(bean)
+}
+```
+
+* `Update` update one or more records, default will update non-empty and non-zero fields except when you use Cols, AllCols and so on.
+
+```Go
+affected, err := engine.ID(1).Update(&user)
+// UPDATE user SET ... Where id = ?
+
+affected, err := engine.Update(&user, &User{Name:name})
+// UPDATE user SET ... Where name = ?
+
+var ids = []int64{1, 2, 3}
+affected, err := engine.In("id", ids).Update(&user)
+// UPDATE user SET ... Where id IN (?, ?, ?)
+
+// force update indicated columns by Cols
+affected, err := engine.ID(1).Cols("age").Update(&User{Name:name, Age: 12})
+// UPDATE user SET age = ?, updated=? Where id = ?
+
+// force NOT update indicated columns by Omit
+affected, err := engine.ID(1).Omit("name").Update(&User{Name:name, Age: 12})
+// UPDATE user SET age = ?, updated=? Where id = ?
+
+affected, err := engine.ID(1).AllCols().Update(&user)
+// UPDATE user SET name=?,age=?,salt=?,passwd=?,updated=? Where id = ?
+```
+
+* `Delete` delete one or more records, Delete MUST have condition
+
+```Go
+affected, err := engine.Where(...).Delete(&user)
+// DELETE FROM user Where ...
+
+affected, err := engine.ID(2).Delete(&user)
+// DELETE FROM user Where id = ?
+```
+
+* `Count` count records
+
+```Go
+counts, err := engine.Count(&user)
+// SELECT count(*) AS total FROM user
+```
+
+* `FindAndCount` combines function `Find` with `Count` which is usually used in query by page
+
+```Go
+var users []User
+counts, err := engine.FindAndCount(&users)
+```
+
+* `Sum` sum functions
+
+```Go
+agesFloat64, err := engine.Sum(&user, "age")
+// SELECT sum(age) AS total FROM user
+
+agesInt64, err := engine.SumInt(&user, "age")
+// SELECT sum(age) AS total FROM user
+
+sumFloat64Slice, err := engine.Sums(&user, "age", "score")
+// SELECT sum(age), sum(score) FROM user
+
+sumInt64Slice, err := engine.SumsInt(&user, "age", "score")
+// SELECT sum(age), sum(score) FROM user
+```
+
+* Query conditions builder
+
+```Go
+err := engine.Where(builder.NotIn("a", 1, 2).And(builder.In("b", "c", "d", "e"))).Find(&users)
+// SELECT id, name ... FROM user WHERE a NOT IN (?, ?) AND b IN (?, ?, ?)
+```
+
+* Multiple operations in one go routine, no transation here but resue session memory
+
+```Go
+session := engine.NewSession()
+defer session.Close()
+
+user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()}
+if _, err := session.Insert(&user1); err != nil {
+ return err
+}
+
+user2 := Userinfo{Username: "yyy"}
+if _, err := session.Where("id = ?", 2).Update(&user2); err != nil {
+ return err
+}
+
+if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil {
+ return err
+}
+
+return nil
+```
+
+* Transation should on one go routine. There is transaction and resue session memory
+
+```Go
+session := engine.NewSession()
+defer session.Close()
+
+// add Begin() before any action
+if err := session.Begin(); err != nil {
+ // if returned then will rollback automatically
+ return err
+}
+
+user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()}
+if _, err := session.Insert(&user1); err != nil {
+ return err
+}
+
+user2 := Userinfo{Username: "yyy"}
+if _, err := session.Where("id = ?", 2).Update(&user2); err != nil {
+ return err
+}
+
+if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil {
+ return err
+}
+
+// add Commit() after all actions
+return session.Commit()
+```
+
+* Or you can use `Transaction` to replace above codes.
+
+```Go
+res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) {
+ user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()}
+ if _, err := session.Insert(&user1); err != nil {
+ return nil, err
+ }
+
+ user2 := Userinfo{Username: "yyy"}
+ if _, err := session.Where("id = ?", 2).Update(&user2); err != nil {
+ return nil, err
+ }
+
+ if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil {
+ return nil, err
+ }
+ return nil, nil
+})
+```
+
+* Context Cache, if enabled, current query result will be cached on session and be used by next same statement on the same session.
+
+```Go
+ sess := engine.NewSession()
+ defer sess.Close()
+
+ var context = xorm.NewMemoryContextCache()
+
+ var c2 ContextGetStruct
+ has, err := sess.ID(1).ContextCache(context).Get(&c2)
+ assert.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, 1, c2.Id)
+ assert.EqualValues(t, "1", c2.Name)
+ sql, args := sess.LastSQL()
+ assert.True(t, len(sql) > 0)
+ assert.True(t, len(args) > 0)
+
+ var c3 ContextGetStruct
+ has, err = sess.ID(1).ContextCache(context).Get(&c3)
+ assert.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, 1, c3.Id)
+ assert.EqualValues(t, "1", c3.Name)
+ sql, args = sess.LastSQL()
+ assert.True(t, len(sql) == 0)
+ assert.True(t, len(args) == 0)
+```
+
+## Contributing
+
+If you want to pull request, please see [CONTRIBUTING](https://github.com/go-xorm/xorm/blob/master/CONTRIBUTING.md). And we also provide [Xorm on Google Groups](https://groups.google.com/forum/#!forum/xorm) to discuss.
+
+## Credits
+
+### Contributors
+
+This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
+
+
+### Backers
+
+Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/xorm#backer)]
+
+
+
+### Sponsors
+
+Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/xorm#sponsor)]
+
+## Changelog
+
+* **v0.7.0**
+ * Some bugs fixed
+
+* **v0.6.6**
+ * Some bugs fixed
+
+* **v0.6.5**
+ * Postgres schema support
+ * vgo support
+ * Add FindAndCount
+ * Database special params support via NewEngineWithParams
+ * Some bugs fixed
+
+* **v0.6.4**
+ * Automatical Read/Write seperatelly
+ * Query/QueryString/QueryInterface and action with Where/And
+ * Get support non-struct variables
+ * BufferSize on Iterate
+ * fix some other bugs.
+
+[More changes ...](https://github.com/go-xorm/manual-en-US/tree/master/chapter-16)
+
+## Cases
+
+* [studygolang](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang)
+
+* [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea)
+
+* [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs)
+
+* [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana)
+
+* [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader)
+
+* [Wego](http://github.com/go-tango/wego)
+
+* [Docker.cn](https://docker.cn/)
+
+* [Xorm Adapter](https://github.com/casbin/xorm-adapter) for [Casbin](https://github.com/casbin/casbin) - [github.com/casbin/xorm-adapter](https://github.com/casbin/xorm-adapter)
+
+* [Gorevel](http://gorevel.cn/) - [github.com/goofcc/gorevel](http://github.com/goofcc/gorevel)
+
+* [Gowalker](http://gowalker.org) - [github.com/Unknwon/gowalker](http://github.com/Unknwon/gowalker)
+
+* [Gobuild.io](http://gobuild.io) - [github.com/shxsun/gobuild](http://github.com/shxsun/gobuild)
+
+* [Sudo China](http://sudochina.com) - [github.com/insionng/toropress](http://github.com/insionng/toropress)
+
+* [Godaily](http://godaily.org) - [github.com/govc/godaily](http://github.com/govc/godaily)
+
+* [YouGam](http://www.yougam.com/)
+
+* [GoCMS - github.com/zzboy/GoCMS](https://github.com/zzdboy/GoCMS)
+
+* [GoBBS - gobbs.domolo.com](http://gobbs.domolo.com/)
+
+* [go-blog](http://wangcheng.me) - [github.com/easykoo/go-blog](https://github.com/easykoo/go-blog)
+
+## LICENSE
+
+BSD License [http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/)
diff --git a/vendor/github.com/go-xorm/xorm/README_CN.md b/vendor/github.com/go-xorm/xorm/README_CN.md
new file mode 100644
index 0000000..0cec6ed
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/README_CN.md
@@ -0,0 +1,500 @@
+# xorm
+
+[English](https://github.com/go-xorm/xorm/blob/master/README.md)
+
+xorm是一个简单而强大的Go语言ORM库. 通过它可以使数据库操作非常简便。
+
+[![CircleCI](https://circleci.com/gh/go-xorm/xorm.svg?style=shield)](https://circleci.com/gh/go-xorm/xorm) [![codecov](https://codecov.io/gh/go-xorm/xorm/branch/master/graph/badge.svg)](https://codecov.io/gh/go-xorm/xorm)
+[![](https://goreportcard.com/badge/github.com/go-xorm/xorm)](https://goreportcard.com/report/github.com/go-xorm/xorm)
+[![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3)
+
+## 特性
+
+* 支持Struct和数据库表之间的灵活映射,并支持自动同步
+
+* 事务支持
+
+* 同时支持原始SQL语句和ORM操作的混合执行
+
+* 使用连写来简化调用
+
+* 支持使用Id, In, Where, Limit, Join, Having, Table, Sql, Cols等函数和结构体等方式作为条件
+
+* 支持级联加载Struct
+
+* Schema支持(仅Postgres)
+
+* 支持缓存
+
+* 支持根据数据库自动生成xorm的结构体
+
+* 支持记录版本(即乐观锁)
+
+* 内置SQL Builder支持
+
+* 上下文缓存支持
+
+## 驱动支持
+
+目前支持的Go数据库驱动和对应的数据库如下:
+
+* Mysql: [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql)
+
+* MyMysql: [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv)
+
+* Postgres: [github.com/lib/pq](https://github.com/lib/pq)
+
+* Tidb: [github.com/pingcap/tidb](https://github.com/pingcap/tidb)
+
+* SQLite: [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3)
+
+* MsSql: [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb)
+
+* MsSql: [github.com/lunny/godbc](https://github.com/lunny/godbc)
+
+* Oracle: [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (试验性支持)
+
+## 安装
+
+ go get github.com/go-xorm/xorm
+
+## 文档
+
+* [操作指南](http://xorm.io/docs)
+
+* [GoWalker代码文档](http://gowalker.org/github.com/go-xorm/xorm)
+
+* [Godoc代码文档](http://godoc.org/github.com/go-xorm/xorm)
+
+# 快速开始
+
+* 第一步创建引擎,driverName, dataSourceName和database/sql接口相同
+
+```Go
+engine, err := xorm.NewEngine(driverName, dataSourceName)
+```
+
+* 定义一个和表同步的结构体,并且自动同步结构体到数据库
+
+```Go
+type User struct {
+ Id int64
+ Name string
+ Salt string
+ Age int
+ Passwd string `xorm:"varchar(200)"`
+ Created time.Time `xorm:"created"`
+ Updated time.Time `xorm:"updated"`
+}
+
+err := engine.Sync2(new(User))
+```
+
+* 创建Engine组
+
+```Go
+dataSourceNameSlice := []string{masterDataSourceName, slave1DataSourceName, slave2DataSourceName}
+engineGroup, err := xorm.NewEngineGroup(driverName, dataSourceNameSlice)
+```
+
+```Go
+masterEngine, err := xorm.NewEngine(driverName, masterDataSourceName)
+slave1Engine, err := xorm.NewEngine(driverName, slave1DataSourceName)
+slave2Engine, err := xorm.NewEngine(driverName, slave2DataSourceName)
+engineGroup, err := xorm.NewEngineGroup(masterEngine, []*Engine{slave1Engine, slave2Engine})
+```
+
+所有使用 `engine` 都可以简单的用 `engineGroup` 来替换。
+
+* `Query` 最原始的也支持SQL语句查询,返回的结果类型为 []map[string][]byte。`QueryString` 返回 []map[string]string, `QueryInterface` 返回 `[]map[string]interface{}`.
+
+```Go
+results, err := engine.Query("select * from user")
+results, err := engine.Where("a = 1").Query()
+
+results, err := engine.QueryString("select * from user")
+results, err := engine.Where("a = 1").QueryString()
+
+results, err := engine.QueryInterface("select * from user")
+results, err := engine.Where("a = 1").QueryInterface()
+```
+
+* `Exec` 执行一个SQL语句
+
+```Go
+affected, err := engine.Exec("update user set age = ? where name = ?", age, name)
+```
+
+* `Insert` 插入一条或者多条记录
+
+```Go
+affected, err := engine.Insert(&user)
+// INSERT INTO struct () values ()
+
+affected, err := engine.Insert(&user1, &user2)
+// INSERT INTO struct1 () values ()
+// INSERT INTO struct2 () values ()
+
+affected, err := engine.Insert(&users)
+// INSERT INTO struct () values (),(),()
+
+affected, err := engine.Insert(&user1, &users)
+// INSERT INTO struct1 () values ()
+// INSERT INTO struct2 () values (),(),()
+```
+
+* `Get` 查询单条记录
+
+```Go
+has, err := engine.Get(&user)
+// SELECT * FROM user LIMIT 1
+
+has, err := engine.Where("name = ?", name).Desc("id").Get(&user)
+// SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1
+
+var name string
+has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name)
+// SELECT name FROM user WHERE id = ?
+
+var id int64
+has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id)
+has, err := engine.SQL("select id from user").Get(&id)
+// SELECT id FROM user WHERE name = ?
+
+var valuesMap = make(map[string]string)
+has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap)
+// SELECT * FROM user WHERE id = ?
+
+var valuesSlice = make([]interface{}, len(cols))
+has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice)
+// SELECT col1, col2, col3 FROM user WHERE id = ?
+```
+
+* `Exist` 检测记录是否存在
+
+```Go
+has, err := testEngine.Exist(new(RecordExist))
+// SELECT * FROM record_exist LIMIT 1
+
+has, err = testEngine.Exist(&RecordExist{
+ Name: "test1",
+ })
+// SELECT * FROM record_exist WHERE name = ? LIMIT 1
+
+has, err = testEngine.Where("name = ?", "test1").Exist(&RecordExist{})
+// SELECT * FROM record_exist WHERE name = ? LIMIT 1
+
+has, err = testEngine.SQL("select * from record_exist where name = ?", "test1").Exist()
+// select * from record_exist where name = ?
+
+has, err = testEngine.Table("record_exist").Exist()
+// SELECT * FROM record_exist LIMIT 1
+
+has, err = testEngine.Table("record_exist").Where("name = ?", "test1").Exist()
+// SELECT * FROM record_exist WHERE name = ? LIMIT 1
+```
+
+* `Find` 查询多条记录,当然可以使用Join和extends来组合使用
+
+```Go
+var users []User
+err := engine.Where("name = ?", name).And("age > 10").Limit(10, 0).Find(&users)
+// SELECT * FROM user WHERE name = ? AND age > 10 limit 10 offset 0
+
+type Detail struct {
+ Id int64
+ UserId int64 `xorm:"index"`
+}
+
+type UserDetail struct {
+ User `xorm:"extends"`
+ Detail `xorm:"extends"`
+}
+
+var users []UserDetail
+err := engine.Table("user").Select("user.*, detail.*")
+ Join("INNER", "detail", "detail.user_id = user.id").
+ Where("user.name = ?", name).Limit(10, 0).
+ Find(&users)
+// SELECT user.*, detail.* FROM user INNER JOIN detail WHERE user.name = ? limit 10 offset 0
+```
+
+* `Iterate` 和 `Rows` 根据条件遍历数据库,可以有两种方式: Iterate and Rows
+
+```Go
+err := engine.Iterate(&User{Name:name}, func(idx int, bean interface{}) error {
+ user := bean.(*User)
+ return nil
+})
+// SELECT * FROM user
+
+err := engine.BufferSize(100).Iterate(&User{Name:name}, func(idx int, bean interface{}) error {
+ user := bean.(*User)
+ return nil
+})
+// SELECT * FROM user Limit 0, 100
+// SELECT * FROM user Limit 101, 100
+
+rows, err := engine.Rows(&User{Name:name})
+// SELECT * FROM user
+defer rows.Close()
+bean := new(Struct)
+for rows.Next() {
+ err = rows.Scan(bean)
+}
+```
+
+* `Update` 更新数据,除非使用Cols,AllCols函数指明,默认只更新非空和非0的字段
+
+```Go
+affected, err := engine.ID(1).Update(&user)
+// UPDATE user SET ... Where id = ?
+
+affected, err := engine.Update(&user, &User{Name:name})
+// UPDATE user SET ... Where name = ?
+
+var ids = []int64{1, 2, 3}
+affected, err := engine.In(ids).Update(&user)
+// UPDATE user SET ... Where id IN (?, ?, ?)
+
+// force update indicated columns by Cols
+affected, err := engine.ID(1).Cols("age").Update(&User{Name:name, Age: 12})
+// UPDATE user SET age = ?, updated=? Where id = ?
+
+// force NOT update indicated columns by Omit
+affected, err := engine.ID(1).Omit("name").Update(&User{Name:name, Age: 12})
+// UPDATE user SET age = ?, updated=? Where id = ?
+
+affected, err := engine.ID(1).AllCols().Update(&user)
+// UPDATE user SET name=?,age=?,salt=?,passwd=?,updated=? Where id = ?
+```
+
+* `Delete` 删除记录,需要注意,删除必须至少有一个条件,否则会报错。要清空数据库可以用EmptyTable
+
+```Go
+affected, err := engine.Where(...).Delete(&user)
+// DELETE FROM user Where ...
+
+affected, err := engine.ID(2).Delete(&user)
+// DELETE FROM user Where id = ?
+```
+
+* `Count` 获取记录条数
+
+```Go
+counts, err := engine.Count(&user)
+// SELECT count(*) AS total FROM user
+```
+
+* `Sum` 求和函数
+
+```Go
+agesFloat64, err := engine.Sum(&user, "age")
+// SELECT sum(age) AS total FROM user
+
+agesInt64, err := engine.SumInt(&user, "age")
+// SELECT sum(age) AS total FROM user
+
+sumFloat64Slice, err := engine.Sums(&user, "age", "score")
+// SELECT sum(age), sum(score) FROM user
+
+sumInt64Slice, err := engine.SumsInt(&user, "age", "score")
+// SELECT sum(age), sum(score) FROM user
+```
+
+* 条件编辑器
+
+```Go
+err := engine.Where(builder.NotIn("a", 1, 2).And(builder.In("b", "c", "d", "e"))).Find(&users)
+// SELECT id, name ... FROM user WHERE a NOT IN (?, ?) AND b IN (?, ?, ?)
+```
+
+* 在一个Go程中多次操作数据库,但没有事务
+
+```Go
+session := engine.NewSession()
+defer session.Close()
+
+user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()}
+if _, err := session.Insert(&user1); err != nil {
+ return err
+}
+
+user2 := Userinfo{Username: "yyy"}
+if _, err := session.Where("id = ?", 2).Update(&user2); err != nil {
+ return err
+}
+
+if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil {
+ return err
+}
+
+return nil
+```
+
+* 在一个Go程中有事务
+
+```Go
+session := engine.NewSession()
+defer session.Close()
+
+// add Begin() before any action
+if err := session.Begin(); err != nil {
+ // if returned then will rollback automatically
+ return err
+}
+
+user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()}
+if _, err := session.Insert(&user1); err != nil {
+ return err
+}
+
+user2 := Userinfo{Username: "yyy"}
+if _, err := session.Where("id = ?", 2).Update(&user2); err != nil {
+ return err
+}
+
+if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil {
+ return err
+}
+
+// add Commit() after all actions
+return session.Commit()
+```
+
+* 事务的简写方法
+
+```Go
+res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) {
+ user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()}
+ if _, err := session.Insert(&user1); err != nil {
+ return nil, err
+ }
+
+ user2 := Userinfo{Username: "yyy"}
+ if _, err := session.Where("id = ?", 2).Update(&user2); err != nil {
+ return nil, err
+ }
+
+ if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil {
+ return nil, err
+ }
+ return nil, nil
+})
+```
+
+* 上下文缓存,如果启用,那么针对单个对象的查询将会被缓存到系统中,可以被下一个查询使用。
+
+```Go
+ sess := engine.NewSession()
+ defer sess.Close()
+
+ var context = xorm.NewMemoryContextCache()
+
+ var c2 ContextGetStruct
+ has, err := sess.ID(1).ContextCache(context).Get(&c2)
+ assert.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, 1, c2.Id)
+ assert.EqualValues(t, "1", c2.Name)
+ sql, args := sess.LastSQL()
+ assert.True(t, len(sql) > 0)
+ assert.True(t, len(args) > 0)
+
+ var c3 ContextGetStruct
+ has, err = sess.ID(1).ContextCache(context).Get(&c3)
+ assert.NoError(t, err)
+ assert.True(t, has)
+ assert.EqualValues(t, 1, c3.Id)
+ assert.EqualValues(t, "1", c3.Name)
+ sql, args = sess.LastSQL()
+ assert.True(t, len(sql) == 0)
+ assert.True(t, len(args) == 0)
+```
+
+## 贡献
+
+如果您也想为Xorm贡献您的力量,请查看 [CONTRIBUTING](https://github.com/go-xorm/xorm/blob/master/CONTRIBUTING.md)。您也可以加入QQ群 技术帮助和讨论。
+群一:280360085 (已满)
+群二:795010183
+
+## Credits
+
+### Contributors
+
+感谢所有的贡献者. [[Contribute](CONTRIBUTING.md)].
+
+
+### Backers
+
+感谢我们所有的 backers! 🙏 [[成为 backer](https://opencollective.com/xorm#backer)]
+
+
+
+### Sponsors
+
+成为 sponsor 来支持 xorm。您的 logo 将会被显示并被链接到您的网站。 [[成为 sponsor](https://opencollective.com/xorm#sponsor)]
+
+# 案例
+
+* [Go语言中文网](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang)
+
+* [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea)
+
+* [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs)
+
+* [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana)
+
+* [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader)
+
+* [Wego](http://github.com/go-tango/wego)
+
+* [Docker.cn](https://docker.cn/)
+
+* [Xorm Adapter](https://github.com/casbin/xorm-adapter) for [Casbin](https://github.com/casbin/casbin) - [github.com/casbin/xorm-adapter](https://github.com/casbin/xorm-adapter)
+
+* [Gowalker](http://gowalker.org) - [github.com/Unknwon/gowalker](http://github.com/Unknwon/gowalker)
+
+* [Gobuild.io](http://gobuild.io) - [github.com/shxsun/gobuild](http://github.com/shxsun/gobuild)
+
+* [Sudo China](http://sudochina.com) - [github.com/insionng/toropress](http://github.com/insionng/toropress)
+
+* [Godaily](http://godaily.org) - [github.com/govc/godaily](http://github.com/govc/godaily)
+
+* [YouGam](http://www.yougam.com/)
+
+* [GoCMS - github.com/zzboy/GoCMS](https://github.com/zzdboy/GoCMS)
+
+* [GoBBS - gobbs.domolo.com](http://gobbs.domolo.com/)
+
+* [go-blog](http://wangcheng.me) - [github.com/easykoo/go-blog](https://github.com/easykoo/go-blog)
+
+
+## 更新日志
+
+* **v0.7.0**
+ * 修正部分Bug
+
+* **v0.6.6**
+ * 修正部分Bug
+
+* **v0.6.5**
+ * 通过 engine.SetSchema 来支持 schema,当前仅支持Postgres
+ * vgo 支持
+ * 新增 `FindAndCount` 函数
+ * 通过 `NewEngineWithParams` 支持数据库特别参数
+ * 修正部分Bug
+
+* **v0.6.4**
+ * 自动读写分离支持
+ * Query/QueryString/QueryInterface 支持与 Where/And 合用
+ * `Get` 支持获取非结构体变量
+ * `Iterate` 支持 `BufferSize`
+ * 修正部分Bug
+
+[更多更新日志...](https://github.com/go-xorm/manual-zh-CN/tree/master/chapter-16)
+
+## LICENSE
+
+BSD License
+[http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/)
diff --git a/vendor/github.com/go-xorm/xorm/cache_lru.go b/vendor/github.com/go-xorm/xorm/cache_lru.go
new file mode 100644
index 0000000..ab948bd
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/cache_lru.go
@@ -0,0 +1,284 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "container/list"
+ "fmt"
+ "sync"
+ "time"
+
+ "xorm.io/core"
+)
+
+// LRUCacher implments cache object facilities
+type LRUCacher struct {
+ idList *list.List
+ sqlList *list.List
+ idIndex map[string]map[string]*list.Element
+ sqlIndex map[string]map[string]*list.Element
+ store core.CacheStore
+ mutex sync.Mutex
+ MaxElementSize int
+ Expired time.Duration
+ GcInterval time.Duration
+}
+
+// NewLRUCacher creates a cacher
+func NewLRUCacher(store core.CacheStore, maxElementSize int) *LRUCacher {
+ return NewLRUCacher2(store, 3600*time.Second, maxElementSize)
+}
+
+// NewLRUCacher2 creates a cache include different params
+func NewLRUCacher2(store core.CacheStore, expired time.Duration, maxElementSize int) *LRUCacher {
+ cacher := &LRUCacher{store: store, idList: list.New(),
+ sqlList: list.New(), Expired: expired,
+ GcInterval: core.CacheGcInterval, MaxElementSize: maxElementSize,
+ sqlIndex: make(map[string]map[string]*list.Element),
+ idIndex: make(map[string]map[string]*list.Element),
+ }
+ cacher.RunGC()
+ return cacher
+}
+
+// RunGC run once every m.GcInterval
+func (m *LRUCacher) RunGC() {
+ time.AfterFunc(m.GcInterval, func() {
+ m.RunGC()
+ m.GC()
+ })
+}
+
+// GC check ids lit and sql list to remove all element expired
+func (m *LRUCacher) GC() {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ var removedNum int
+ for e := m.idList.Front(); e != nil; {
+ if removedNum <= core.CacheGcMaxRemoved &&
+ time.Now().Sub(e.Value.(*idNode).lastVisit) > m.Expired {
+ removedNum++
+ next := e.Next()
+ node := e.Value.(*idNode)
+ m.delBean(node.tbName, node.id)
+ e = next
+ } else {
+ break
+ }
+ }
+
+ removedNum = 0
+ for e := m.sqlList.Front(); e != nil; {
+ if removedNum <= core.CacheGcMaxRemoved &&
+ time.Now().Sub(e.Value.(*sqlNode).lastVisit) > m.Expired {
+ removedNum++
+ next := e.Next()
+ node := e.Value.(*sqlNode)
+ m.delIds(node.tbName, node.sql)
+ e = next
+ } else {
+ break
+ }
+ }
+}
+
+// GetIds returns all bean's ids according to sql and parameter from cache
+func (m *LRUCacher) GetIds(tableName, sql string) interface{} {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ if _, ok := m.sqlIndex[tableName]; !ok {
+ m.sqlIndex[tableName] = make(map[string]*list.Element)
+ }
+ if v, err := m.store.Get(sql); err == nil {
+ if el, ok := m.sqlIndex[tableName][sql]; !ok {
+ el = m.sqlList.PushBack(newSQLNode(tableName, sql))
+ m.sqlIndex[tableName][sql] = el
+ } else {
+ lastTime := el.Value.(*sqlNode).lastVisit
+ // if expired, remove the node and return nil
+ if time.Now().Sub(lastTime) > m.Expired {
+ m.delIds(tableName, sql)
+ return nil
+ }
+ m.sqlList.MoveToBack(el)
+ el.Value.(*sqlNode).lastVisit = time.Now()
+ }
+ return v
+ }
+
+ m.delIds(tableName, sql)
+ return nil
+}
+
+// GetBean returns bean according tableName and id from cache
+func (m *LRUCacher) GetBean(tableName string, id string) interface{} {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ if _, ok := m.idIndex[tableName]; !ok {
+ m.idIndex[tableName] = make(map[string]*list.Element)
+ }
+ tid := genID(tableName, id)
+ if v, err := m.store.Get(tid); err == nil {
+ if el, ok := m.idIndex[tableName][id]; ok {
+ lastTime := el.Value.(*idNode).lastVisit
+ // if expired, remove the node and return nil
+ if time.Now().Sub(lastTime) > m.Expired {
+ m.delBean(tableName, id)
+ return nil
+ }
+ m.idList.MoveToBack(el)
+ el.Value.(*idNode).lastVisit = time.Now()
+ } else {
+ el = m.idList.PushBack(newIDNode(tableName, id))
+ m.idIndex[tableName][id] = el
+ }
+ return v
+ }
+
+ // store bean is not exist, then remove memory's index
+ m.delBean(tableName, id)
+ return nil
+}
+
+// clearIds clears all sql-ids mapping on table tableName from cache
+func (m *LRUCacher) clearIds(tableName string) {
+ if tis, ok := m.sqlIndex[tableName]; ok {
+ for sql, v := range tis {
+ m.sqlList.Remove(v)
+ m.store.Del(sql)
+ }
+ }
+ m.sqlIndex[tableName] = make(map[string]*list.Element)
+}
+
+// ClearIds clears all sql-ids mapping on table tableName from cache
+func (m *LRUCacher) ClearIds(tableName string) {
+ m.mutex.Lock()
+ m.clearIds(tableName)
+ m.mutex.Unlock()
+}
+
+func (m *LRUCacher) clearBeans(tableName string) {
+ if tis, ok := m.idIndex[tableName]; ok {
+ for id, v := range tis {
+ m.idList.Remove(v)
+ tid := genID(tableName, id)
+ m.store.Del(tid)
+ }
+ }
+ m.idIndex[tableName] = make(map[string]*list.Element)
+}
+
+// ClearBeans clears all beans in some table
+func (m *LRUCacher) ClearBeans(tableName string) {
+ m.mutex.Lock()
+ m.clearBeans(tableName)
+ m.mutex.Unlock()
+}
+
+// PutIds pus ids into table
+func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) {
+ m.mutex.Lock()
+ if _, ok := m.sqlIndex[tableName]; !ok {
+ m.sqlIndex[tableName] = make(map[string]*list.Element)
+ }
+ if el, ok := m.sqlIndex[tableName][sql]; !ok {
+ el = m.sqlList.PushBack(newSQLNode(tableName, sql))
+ m.sqlIndex[tableName][sql] = el
+ } else {
+ el.Value.(*sqlNode).lastVisit = time.Now()
+ }
+ m.store.Put(sql, ids)
+ if m.sqlList.Len() > m.MaxElementSize {
+ e := m.sqlList.Front()
+ node := e.Value.(*sqlNode)
+ m.delIds(node.tbName, node.sql)
+ }
+ m.mutex.Unlock()
+}
+
+// PutBean puts beans into table
+func (m *LRUCacher) PutBean(tableName string, id string, obj interface{}) {
+ m.mutex.Lock()
+ var el *list.Element
+ var ok bool
+
+ if el, ok = m.idIndex[tableName][id]; !ok {
+ el = m.idList.PushBack(newIDNode(tableName, id))
+ m.idIndex[tableName][id] = el
+ } else {
+ el.Value.(*idNode).lastVisit = time.Now()
+ }
+
+ m.store.Put(genID(tableName, id), obj)
+ if m.idList.Len() > m.MaxElementSize {
+ e := m.idList.Front()
+ node := e.Value.(*idNode)
+ m.delBean(node.tbName, node.id)
+ }
+ m.mutex.Unlock()
+}
+
+func (m *LRUCacher) delIds(tableName, sql string) {
+ if _, ok := m.sqlIndex[tableName]; ok {
+ if el, ok := m.sqlIndex[tableName][sql]; ok {
+ delete(m.sqlIndex[tableName], sql)
+ m.sqlList.Remove(el)
+ }
+ }
+ m.store.Del(sql)
+}
+
+// DelIds deletes ids
+func (m *LRUCacher) DelIds(tableName, sql string) {
+ m.mutex.Lock()
+ m.delIds(tableName, sql)
+ m.mutex.Unlock()
+}
+
+func (m *LRUCacher) delBean(tableName string, id string) {
+ tid := genID(tableName, id)
+ if el, ok := m.idIndex[tableName][id]; ok {
+ delete(m.idIndex[tableName], id)
+ m.idList.Remove(el)
+ m.clearIds(tableName)
+ }
+ m.store.Del(tid)
+}
+
+// DelBean deletes beans in some table
+func (m *LRUCacher) DelBean(tableName string, id string) {
+ m.mutex.Lock()
+ m.delBean(tableName, id)
+ m.mutex.Unlock()
+}
+
+type idNode struct {
+ tbName string
+ id string
+ lastVisit time.Time
+}
+
+type sqlNode struct {
+ tbName string
+ sql string
+ lastVisit time.Time
+}
+
+func genSQLKey(sql string, args interface{}) string {
+ return fmt.Sprintf("%v-%v", sql, args)
+}
+
+func genID(prefix string, id string) string {
+ return fmt.Sprintf("%v-%v", prefix, id)
+}
+
+func newIDNode(tbName string, id string) *idNode {
+ return &idNode{tbName, id, time.Now()}
+}
+
+func newSQLNode(tbName, sql string) *sqlNode {
+ return &sqlNode{tbName, sql, time.Now()}
+}
diff --git a/vendor/github.com/go-xorm/xorm/cache_memory_store.go b/vendor/github.com/go-xorm/xorm/cache_memory_store.go
new file mode 100644
index 0000000..0c483f4
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/cache_memory_store.go
@@ -0,0 +1,51 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "sync"
+
+ "xorm.io/core"
+)
+
+var _ core.CacheStore = NewMemoryStore()
+
+// MemoryStore represents in-memory store
+type MemoryStore struct {
+ store map[interface{}]interface{}
+ mutex sync.RWMutex
+}
+
+// NewMemoryStore creates a new store in memory
+func NewMemoryStore() *MemoryStore {
+ return &MemoryStore{store: make(map[interface{}]interface{})}
+}
+
+// Put puts object into store
+func (s *MemoryStore) Put(key string, value interface{}) error {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.store[key] = value
+ return nil
+}
+
+// Get gets object from store
+func (s *MemoryStore) Get(key string) (interface{}, error) {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+ if v, ok := s.store[key]; ok {
+ return v, nil
+ }
+
+ return nil, ErrNotExist
+}
+
+// Del deletes object
+func (s *MemoryStore) Del(key string) error {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ delete(s.store, key)
+ return nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/context_cache.go b/vendor/github.com/go-xorm/xorm/context_cache.go
new file mode 100644
index 0000000..1bc2288
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/context_cache.go
@@ -0,0 +1,30 @@
+// Copyright 2018 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+// ContextCache is the interface that operates the cache data.
+type ContextCache interface {
+ // Put puts value into cache with key.
+ Put(key string, val interface{})
+ // Get gets cached value by given key.
+ Get(key string) interface{}
+}
+
+type memoryContextCache map[string]interface{}
+
+// NewMemoryContextCache return memoryContextCache
+func NewMemoryContextCache() memoryContextCache {
+ return make(map[string]interface{})
+}
+
+// Put puts value into cache with key.
+func (m memoryContextCache) Put(key string, val interface{}) {
+ m[key] = val
+}
+
+// Get gets cached value by given key.
+func (m memoryContextCache) Get(key string) interface{} {
+ return m[key]
+}
diff --git a/vendor/github.com/go-xorm/xorm/convert.go b/vendor/github.com/go-xorm/xorm/convert.go
new file mode 100644
index 0000000..2316ca0
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/convert.go
@@ -0,0 +1,348 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error
+
+func strconvErr(err error) error {
+ if ne, ok := err.(*strconv.NumError); ok {
+ return ne.Err
+ }
+ return err
+}
+
+func cloneBytes(b []byte) []byte {
+ if b == nil {
+ return nil
+ } else {
+ c := make([]byte, len(b))
+ copy(c, b)
+ return c
+ }
+}
+
+func asString(src interface{}) string {
+ switch v := src.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ }
+ rv := reflect.ValueOf(src)
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(rv.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(rv.Uint(), 10)
+ case reflect.Float64:
+ return strconv.FormatFloat(rv.Float(), 'g', -1, 64)
+ case reflect.Float32:
+ return strconv.FormatFloat(rv.Float(), 'g', -1, 32)
+ case reflect.Bool:
+ return strconv.FormatBool(rv.Bool())
+ }
+ return fmt.Sprintf("%v", src)
+}
+
+func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.AppendInt(buf, rv.Int(), 10), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.AppendUint(buf, rv.Uint(), 10), true
+ case reflect.Float32:
+ return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true
+ case reflect.Float64:
+ return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true
+ case reflect.Bool:
+ return strconv.AppendBool(buf, rv.Bool()), true
+ case reflect.String:
+ s := rv.String()
+ return append(buf, s...), true
+ }
+ return
+}
+
+// convertAssign copies to dest the value in src, converting it if possible.
+// An error is returned if the copy would result in loss of information.
+// dest should be a pointer type.
+func convertAssign(dest, src interface{}) error {
+ // Common cases, without reflect.
+ switch s := src.(type) {
+ case string:
+ switch d := dest.(type) {
+ case *string:
+ if d == nil {
+ return errNilPtr
+ }
+ *d = s
+ return nil
+ case *[]byte:
+ if d == nil {
+ return errNilPtr
+ }
+ *d = []byte(s)
+ return nil
+ }
+ case []byte:
+ switch d := dest.(type) {
+ case *string:
+ if d == nil {
+ return errNilPtr
+ }
+ *d = string(s)
+ return nil
+ case *interface{}:
+ if d == nil {
+ return errNilPtr
+ }
+ *d = cloneBytes(s)
+ return nil
+ case *[]byte:
+ if d == nil {
+ return errNilPtr
+ }
+ *d = cloneBytes(s)
+ return nil
+ }
+
+ case time.Time:
+ switch d := dest.(type) {
+ case *string:
+ *d = s.Format(time.RFC3339Nano)
+ return nil
+ case *[]byte:
+ if d == nil {
+ return errNilPtr
+ }
+ *d = []byte(s.Format(time.RFC3339Nano))
+ return nil
+ }
+ case nil:
+ switch d := dest.(type) {
+ case *interface{}:
+ if d == nil {
+ return errNilPtr
+ }
+ *d = nil
+ return nil
+ case *[]byte:
+ if d == nil {
+ return errNilPtr
+ }
+ *d = nil
+ return nil
+ }
+ }
+
+ var sv reflect.Value
+
+ switch d := dest.(type) {
+ case *string:
+ sv = reflect.ValueOf(src)
+ switch sv.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+ *d = asString(src)
+ return nil
+ }
+ case *[]byte:
+ sv = reflect.ValueOf(src)
+ if b, ok := asBytes(nil, sv); ok {
+ *d = b
+ return nil
+ }
+ case *bool:
+ bv, err := driver.Bool.ConvertValue(src)
+ if err == nil {
+ *d = bv.(bool)
+ }
+ return err
+ case *interface{}:
+ *d = src
+ return nil
+ }
+
+ dpv := reflect.ValueOf(dest)
+ if dpv.Kind() != reflect.Ptr {
+ return errors.New("destination not a pointer")
+ }
+ if dpv.IsNil() {
+ return errNilPtr
+ }
+
+ if !sv.IsValid() {
+ sv = reflect.ValueOf(src)
+ }
+
+ dv := reflect.Indirect(dpv)
+ if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
+ switch b := src.(type) {
+ case []byte:
+ dv.Set(reflect.ValueOf(cloneBytes(b)))
+ default:
+ dv.Set(sv)
+ }
+ return nil
+ }
+
+ if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) {
+ dv.Set(sv.Convert(dv.Type()))
+ return nil
+ }
+
+ switch dv.Kind() {
+ case reflect.Ptr:
+ if src == nil {
+ dv.Set(reflect.Zero(dv.Type()))
+ return nil
+ }
+
+ dv.Set(reflect.New(dv.Type().Elem()))
+ return convertAssign(dv.Interface(), src)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s := asString(src)
+ i64, err := strconv.ParseInt(s, 10, dv.Type().Bits())
+ if err != nil {
+ err = strconvErr(err)
+ return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
+ }
+ dv.SetInt(i64)
+ return nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ s := asString(src)
+ u64, err := strconv.ParseUint(s, 10, dv.Type().Bits())
+ if err != nil {
+ err = strconvErr(err)
+ return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
+ }
+ dv.SetUint(u64)
+ return nil
+ case reflect.Float32, reflect.Float64:
+ s := asString(src)
+ f64, err := strconv.ParseFloat(s, dv.Type().Bits())
+ if err != nil {
+ err = strconvErr(err)
+ return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
+ }
+ dv.SetFloat(f64)
+ return nil
+ case reflect.String:
+ dv.SetString(asString(src))
+ return nil
+ }
+
+ return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest)
+}
+
+func asKind(vv reflect.Value, tp reflect.Type) (interface{}, error) {
+ switch tp.Kind() {
+ case reflect.Int64:
+ return vv.Int(), nil
+ case reflect.Int:
+ return int(vv.Int()), nil
+ case reflect.Int32:
+ return int32(vv.Int()), nil
+ case reflect.Int16:
+ return int16(vv.Int()), nil
+ case reflect.Int8:
+ return int8(vv.Int()), nil
+ case reflect.Uint64:
+ return vv.Uint(), nil
+ case reflect.Uint:
+ return uint(vv.Uint()), nil
+ case reflect.Uint32:
+ return uint32(vv.Uint()), nil
+ case reflect.Uint16:
+ return uint16(vv.Uint()), nil
+ case reflect.Uint8:
+ return uint8(vv.Uint()), nil
+ case reflect.String:
+ return vv.String(), nil
+ case reflect.Slice:
+ if tp.Elem().Kind() == reflect.Uint8 {
+ v, err := strconv.ParseInt(string(vv.Interface().([]byte)), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+ }
+
+ }
+ return nil, fmt.Errorf("unsupported primary key type: %v, %v", tp, vv)
+}
+
+func convertFloat(v interface{}) (float64, error) {
+ switch v.(type) {
+ case float32:
+ return float64(v.(float32)), nil
+ case float64:
+ return v.(float64), nil
+ case string:
+ i, err := strconv.ParseFloat(v.(string), 64)
+ if err != nil {
+ return 0, err
+ }
+ return i, nil
+ case []byte:
+ i, err := strconv.ParseFloat(string(v.([]byte)), 64)
+ if err != nil {
+ return 0, err
+ }
+ return i, nil
+ }
+ return 0, fmt.Errorf("unsupported type: %v", v)
+}
+
+func convertInt(v interface{}) (int64, error) {
+ switch v.(type) {
+ case int:
+ return int64(v.(int)), nil
+ case int8:
+ return int64(v.(int8)), nil
+ case int16:
+ return int64(v.(int16)), nil
+ case int32:
+ return int64(v.(int32)), nil
+ case int64:
+ return v.(int64), nil
+ case []byte:
+ i, err := strconv.ParseInt(string(v.([]byte)), 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return i, nil
+ case string:
+ i, err := strconv.ParseInt(v.(string), 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return i, nil
+ }
+ return 0, fmt.Errorf("unsupported type: %v", v)
+}
+
+func asBool(bs []byte) (bool, error) {
+ if len(bs) == 0 {
+ return false, nil
+ }
+ if bs[0] == 0x00 {
+ return false, nil
+ } else if bs[0] == 0x01 {
+ return true, nil
+ }
+ return strconv.ParseBool(string(bs))
+}
diff --git a/vendor/github.com/go-xorm/xorm/dialect_mssql.go b/vendor/github.com/go-xorm/xorm/dialect_mssql.go
new file mode 100644
index 0000000..61061cb
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/dialect_mssql.go
@@ -0,0 +1,568 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "xorm.io/core"
+)
+
+var (
+ mssqlReservedWords = map[string]bool{
+ "ADD": true,
+ "EXTERNAL": true,
+ "PROCEDURE": true,
+ "ALL": true,
+ "FETCH": true,
+ "PUBLIC": true,
+ "ALTER": true,
+ "FILE": true,
+ "RAISERROR": true,
+ "AND": true,
+ "FILLFACTOR": true,
+ "READ": true,
+ "ANY": true,
+ "FOR": true,
+ "READTEXT": true,
+ "AS": true,
+ "FOREIGN": true,
+ "RECONFIGURE": true,
+ "ASC": true,
+ "FREETEXT": true,
+ "REFERENCES": true,
+ "AUTHORIZATION": true,
+ "FREETEXTTABLE": true,
+ "REPLICATION": true,
+ "BACKUP": true,
+ "FROM": true,
+ "RESTORE": true,
+ "BEGIN": true,
+ "FULL": true,
+ "RESTRICT": true,
+ "BETWEEN": true,
+ "FUNCTION": true,
+ "RETURN": true,
+ "BREAK": true,
+ "GOTO": true,
+ "REVERT": true,
+ "BROWSE": true,
+ "GRANT": true,
+ "REVOKE": true,
+ "BULK": true,
+ "GROUP": true,
+ "RIGHT": true,
+ "BY": true,
+ "HAVING": true,
+ "ROLLBACK": true,
+ "CASCADE": true,
+ "HOLDLOCK": true,
+ "ROWCOUNT": true,
+ "CASE": true,
+ "IDENTITY": true,
+ "ROWGUIDCOL": true,
+ "CHECK": true,
+ "IDENTITY_INSERT": true,
+ "RULE": true,
+ "CHECKPOINT": true,
+ "IDENTITYCOL": true,
+ "SAVE": true,
+ "CLOSE": true,
+ "IF": true,
+ "SCHEMA": true,
+ "CLUSTERED": true,
+ "IN": true,
+ "SECURITYAUDIT": true,
+ "COALESCE": true,
+ "INDEX": true,
+ "SELECT": true,
+ "COLLATE": true,
+ "INNER": true,
+ "SEMANTICKEYPHRASETABLE": true,
+ "COLUMN": true,
+ "INSERT": true,
+ "SEMANTICSIMILARITYDETAILSTABLE": true,
+ "COMMIT": true,
+ "INTERSECT": true,
+ "SEMANTICSIMILARITYTABLE": true,
+ "COMPUTE": true,
+ "INTO": true,
+ "SESSION_USER": true,
+ "CONSTRAINT": true,
+ "IS": true,
+ "SET": true,
+ "CONTAINS": true,
+ "JOIN": true,
+ "SETUSER": true,
+ "CONTAINSTABLE": true,
+ "KEY": true,
+ "SHUTDOWN": true,
+ "CONTINUE": true,
+ "KILL": true,
+ "SOME": true,
+ "CONVERT": true,
+ "LEFT": true,
+ "STATISTICS": true,
+ "CREATE": true,
+ "LIKE": true,
+ "SYSTEM_USER": true,
+ "CROSS": true,
+ "LINENO": true,
+ "TABLE": true,
+ "CURRENT": true,
+ "LOAD": true,
+ "TABLESAMPLE": true,
+ "CURRENT_DATE": true,
+ "MERGE": true,
+ "TEXTSIZE": true,
+ "CURRENT_TIME": true,
+ "NATIONAL": true,
+ "THEN": true,
+ "CURRENT_TIMESTAMP": true,
+ "NOCHECK": true,
+ "TO": true,
+ "CURRENT_USER": true,
+ "NONCLUSTERED": true,
+ "TOP": true,
+ "CURSOR": true,
+ "NOT": true,
+ "TRAN": true,
+ "DATABASE": true,
+ "NULL": true,
+ "TRANSACTION": true,
+ "DBCC": true,
+ "NULLIF": true,
+ "TRIGGER": true,
+ "DEALLOCATE": true,
+ "OF": true,
+ "TRUNCATE": true,
+ "DECLARE": true,
+ "OFF": true,
+ "TRY_CONVERT": true,
+ "DEFAULT": true,
+ "OFFSETS": true,
+ "TSEQUAL": true,
+ "DELETE": true,
+ "ON": true,
+ "UNION": true,
+ "DENY": true,
+ "OPEN": true,
+ "UNIQUE": true,
+ "DESC": true,
+ "OPENDATASOURCE": true,
+ "UNPIVOT": true,
+ "DISK": true,
+ "OPENQUERY": true,
+ "UPDATE": true,
+ "DISTINCT": true,
+ "OPENROWSET": true,
+ "UPDATETEXT": true,
+ "DISTRIBUTED": true,
+ "OPENXML": true,
+ "USE": true,
+ "DOUBLE": true,
+ "OPTION": true,
+ "USER": true,
+ "DROP": true,
+ "OR": true,
+ "VALUES": true,
+ "DUMP": true,
+ "ORDER": true,
+ "VARYING": true,
+ "ELSE": true,
+ "OUTER": true,
+ "VIEW": true,
+ "END": true,
+ "OVER": true,
+ "WAITFOR": true,
+ "ERRLVL": true,
+ "PERCENT": true,
+ "WHEN": true,
+ "ESCAPE": true,
+ "PIVOT": true,
+ "WHERE": true,
+ "EXCEPT": true,
+ "PLAN": true,
+ "WHILE": true,
+ "EXEC": true,
+ "PRECISION": true,
+ "WITH": true,
+ "EXECUTE": true,
+ "PRIMARY": true,
+ "WITHIN": true,
+ "EXISTS": true,
+ "PRINT": true,
+ "WRITETEXT": true,
+ "EXIT": true,
+ "PROC": true,
+ }
+)
+
+type mssql struct {
+ core.Base
+}
+
+func (db *mssql) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error {
+ return db.Base.Init(d, db, uri, drivername, dataSourceName)
+}
+
+func (db *mssql) SqlType(c *core.Column) string {
+ var res string
+ switch t := c.SQLType.Name; t {
+ case core.Bool:
+ res = core.Bit
+ if strings.EqualFold(c.Default, "true") {
+ c.Default = "1"
+ } else if strings.EqualFold(c.Default, "false") {
+ c.Default = "0"
+ }
+ case core.Serial:
+ c.IsAutoIncrement = true
+ c.IsPrimaryKey = true
+ c.Nullable = false
+ res = core.Int
+ case core.BigSerial:
+ c.IsAutoIncrement = true
+ c.IsPrimaryKey = true
+ c.Nullable = false
+ res = core.BigInt
+ case core.Bytea, core.Blob, core.Binary, core.TinyBlob, core.MediumBlob, core.LongBlob:
+ res = core.VarBinary
+ if c.Length == 0 {
+ c.Length = 50
+ }
+ case core.TimeStamp:
+ res = core.DateTime
+ case core.TimeStampz:
+ res = "DATETIMEOFFSET"
+ c.Length = 7
+ case core.MediumInt:
+ res = core.Int
+ case core.Text, core.MediumText, core.TinyText, core.LongText, core.Json:
+ res = core.Varchar + "(MAX)"
+ case core.Double:
+ res = core.Real
+ case core.Uuid:
+ res = core.Varchar
+ c.Length = 40
+ case core.TinyInt:
+ res = core.TinyInt
+ c.Length = 0
+ default:
+ res = t
+ }
+
+ if res == core.Int {
+ return core.Int
+ }
+
+ hasLen1 := (c.Length > 0)
+ hasLen2 := (c.Length2 > 0)
+
+ if hasLen2 {
+ res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
+ } else if hasLen1 {
+ res += "(" + strconv.Itoa(c.Length) + ")"
+ }
+ return res
+}
+
+func (db *mssql) SupportInsertMany() bool {
+ return true
+}
+
+func (db *mssql) IsReserved(name string) bool {
+ _, ok := mssqlReservedWords[name]
+ return ok
+}
+
+func (db *mssql) Quote(name string) string {
+ return "\"" + name + "\""
+}
+
+func (db *mssql) SupportEngine() bool {
+ return false
+}
+
+func (db *mssql) AutoIncrStr() string {
+ return "IDENTITY"
+}
+
+func (db *mssql) DropTableSql(tableName string) string {
+ return fmt.Sprintf("IF EXISTS (SELECT * FROM sysobjects WHERE id = "+
+ "object_id(N'%s') and OBJECTPROPERTY(id, N'IsUserTable') = 1) "+
+ "DROP TABLE \"%s\"", tableName, tableName)
+}
+
+func (db *mssql) SupportCharset() bool {
+ return false
+}
+
+func (db *mssql) IndexOnTable() bool {
+ return true
+}
+
+func (db *mssql) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
+ args := []interface{}{idxName}
+ sql := "select name from sysindexes where id=object_id('" + tableName + "') and name=?"
+ return sql, args
+}
+
+/*func (db *mssql) ColumnCheckSql(tableName, colName string) (string, []interface{}) {
+ args := []interface{}{tableName, colName}
+ sql := `SELECT "COLUMN_NAME" FROM "INFORMATION_SCHEMA"."COLUMNS" WHERE "TABLE_NAME" = ? AND "COLUMN_NAME" = ?`
+ return sql, args
+}*/
+
+func (db *mssql) IsColumnExist(tableName, colName string) (bool, error) {
+ query := `SELECT "COLUMN_NAME" FROM "INFORMATION_SCHEMA"."COLUMNS" WHERE "TABLE_NAME" = ? AND "COLUMN_NAME" = ?`
+
+ return db.HasRecords(query, tableName, colName)
+}
+
+func (db *mssql) TableCheckSql(tableName string) (string, []interface{}) {
+ args := []interface{}{}
+ sql := "select * from sysobjects where id = object_id(N'" + tableName + "') and OBJECTPROPERTY(id, N'IsUserTable') = 1"
+ return sql, args
+}
+
+func (db *mssql) GetColumns(tableName string) ([]string, map[string]*core.Column, error) {
+ args := []interface{}{}
+ s := `select a.name as name, b.name as ctype,a.max_length,a.precision,a.scale,a.is_nullable as nullable,
+ replace(replace(isnull(c.text,''),'(',''),')','') as vdefault,
+ ISNULL(i.is_primary_key, 0)
+ from sys.columns a
+ left join sys.types b on a.user_type_id=b.user_type_id
+ left join sys.syscomments c on a.default_object_id=c.id
+ LEFT OUTER JOIN
+ sys.index_columns ic ON ic.object_id = a.object_id AND ic.column_id = a.column_id
+ LEFT OUTER JOIN
+ sys.indexes i ON ic.object_id = i.object_id AND ic.index_id = i.index_id
+ where a.object_id=object_id('` + tableName + `')`
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer rows.Close()
+
+ cols := make(map[string]*core.Column)
+ colSeq := make([]string, 0)
+ for rows.Next() {
+ var name, ctype, vdefault string
+ var maxLen, precision, scale int
+ var nullable, isPK bool
+ err = rows.Scan(&name, &ctype, &maxLen, &precision, &scale, &nullable, &vdefault, &isPK)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ col := new(core.Column)
+ col.Indexes = make(map[string]int)
+ col.Name = strings.Trim(name, "` ")
+ col.Nullable = nullable
+ col.Default = vdefault
+ col.IsPrimaryKey = isPK
+ ct := strings.ToUpper(ctype)
+ if ct == "DECIMAL" {
+ col.Length = precision
+ col.Length2 = scale
+ } else {
+ col.Length = maxLen
+ }
+ switch ct {
+ case "DATETIMEOFFSET":
+ col.SQLType = core.SQLType{Name: core.TimeStampz, DefaultLength: 0, DefaultLength2: 0}
+ case "NVARCHAR":
+ col.SQLType = core.SQLType{Name: core.NVarchar, DefaultLength: 0, DefaultLength2: 0}
+ case "IMAGE":
+ col.SQLType = core.SQLType{Name: core.VarBinary, DefaultLength: 0, DefaultLength2: 0}
+ default:
+ if _, ok := core.SqlTypes[ct]; ok {
+ col.SQLType = core.SQLType{Name: ct, DefaultLength: 0, DefaultLength2: 0}
+ } else {
+ return nil, nil, fmt.Errorf("Unknown colType %v for %v - %v", ct, tableName, col.Name)
+ }
+ }
+
+ if col.SQLType.IsText() || col.SQLType.IsTime() {
+ if col.Default != "" {
+ col.Default = "'" + col.Default + "'"
+ } else {
+ if col.DefaultIsEmpty {
+ col.Default = "''"
+ }
+ }
+ }
+ cols[col.Name] = col
+ colSeq = append(colSeq, col.Name)
+ }
+ return colSeq, cols, nil
+}
+
+func (db *mssql) GetTables() ([]*core.Table, error) {
+ args := []interface{}{}
+ s := `select name from sysobjects where xtype ='U'`
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ tables := make([]*core.Table, 0)
+ for rows.Next() {
+ table := core.NewEmptyTable()
+ var name string
+ err = rows.Scan(&name)
+ if err != nil {
+ return nil, err
+ }
+ table.Name = strings.Trim(name, "` ")
+ tables = append(tables, table)
+ }
+ return tables, nil
+}
+
+func (db *mssql) GetIndexes(tableName string) (map[string]*core.Index, error) {
+ args := []interface{}{tableName}
+ s := `SELECT
+IXS.NAME AS [INDEX_NAME],
+C.NAME AS [COLUMN_NAME],
+IXS.is_unique AS [IS_UNIQUE]
+FROM SYS.INDEXES IXS
+INNER JOIN SYS.INDEX_COLUMNS IXCS
+ON IXS.OBJECT_ID=IXCS.OBJECT_ID AND IXS.INDEX_ID = IXCS.INDEX_ID
+INNER JOIN SYS.COLUMNS C ON IXS.OBJECT_ID=C.OBJECT_ID
+AND IXCS.COLUMN_ID=C.COLUMN_ID
+WHERE IXS.TYPE_DESC='NONCLUSTERED' and OBJECT_NAME(IXS.OBJECT_ID) =?
+`
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ indexes := make(map[string]*core.Index, 0)
+ for rows.Next() {
+ var indexType int
+ var indexName, colName, isUnique string
+
+ err = rows.Scan(&indexName, &colName, &isUnique)
+ if err != nil {
+ return nil, err
+ }
+
+ i, err := strconv.ParseBool(isUnique)
+ if err != nil {
+ return nil, err
+ }
+
+ if i {
+ indexType = core.UniqueType
+ } else {
+ indexType = core.IndexType
+ }
+
+ colName = strings.Trim(colName, "` ")
+ var isRegular bool
+ if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
+ indexName = indexName[5+len(tableName):]
+ isRegular = true
+ }
+
+ var index *core.Index
+ var ok bool
+ if index, ok = indexes[indexName]; !ok {
+ index = new(core.Index)
+ index.Type = indexType
+ index.Name = indexName
+ index.IsRegular = isRegular
+ indexes[indexName] = index
+ }
+ index.AddColumn(colName)
+ }
+ return indexes, nil
+}
+
+func (db *mssql) CreateTableSql(table *core.Table, tableName, storeEngine, charset string) string {
+ var sql string
+ if tableName == "" {
+ tableName = table.Name
+ }
+
+ sql = "IF NOT EXISTS (SELECT [name] FROM sys.tables WHERE [name] = '" + tableName + "' ) CREATE TABLE "
+
+ sql += db.Quote(tableName) + " ("
+
+ pkList := table.PrimaryKeys
+
+ for _, colName := range table.ColumnsSeq() {
+ col := table.GetColumn(colName)
+ if col.IsPrimaryKey && len(pkList) == 1 {
+ sql += col.String(db)
+ } else {
+ sql += col.StringNoPk(db)
+ }
+ sql = strings.TrimSpace(sql)
+ sql += ", "
+ }
+
+ if len(pkList) > 1 {
+ sql += "PRIMARY KEY ( "
+ sql += strings.Join(pkList, ",")
+ sql += " ), "
+ }
+
+ sql = sql[:len(sql)-2] + ")"
+ sql += ";"
+ return sql
+}
+
+func (db *mssql) ForUpdateSql(query string) string {
+ return query
+}
+
+func (db *mssql) Filters() []core.Filter {
+ return []core.Filter{&core.IdFilter{}, &core.QuoteFilter{}}
+}
+
+type odbcDriver struct {
+}
+
+func (p *odbcDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) {
+ var dbName string
+
+ if strings.HasPrefix(dataSourceName, "sqlserver://") {
+ u, err := url.Parse(dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ dbName = u.Query().Get("database")
+ } else {
+ kv := strings.Split(dataSourceName, ";")
+ for _, c := range kv {
+ vv := strings.Split(strings.TrimSpace(c), "=")
+ if len(vv) == 2 {
+ switch strings.ToLower(vv[0]) {
+ case "database":
+ dbName = vv[1]
+ }
+ }
+ }
+ }
+ if dbName == "" {
+ return nil, errors.New("no db name provided")
+ }
+ return &core.Uri{DbName: dbName, DbType: core.MSSQL}, nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/dialect_mysql.go b/vendor/github.com/go-xorm/xorm/dialect_mysql.go
new file mode 100644
index 0000000..a108b81
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/dialect_mysql.go
@@ -0,0 +1,656 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "xorm.io/core"
+)
+
+var (
+ mysqlReservedWords = map[string]bool{
+ "ADD": true,
+ "ALL": true,
+ "ALTER": true,
+ "ANALYZE": true,
+ "AND": true,
+ "AS": true,
+ "ASC": true,
+ "ASENSITIVE": true,
+ "BEFORE": true,
+ "BETWEEN": true,
+ "BIGINT": true,
+ "BINARY": true,
+ "BLOB": true,
+ "BOTH": true,
+ "BY": true,
+ "CALL": true,
+ "CASCADE": true,
+ "CASE": true,
+ "CHANGE": true,
+ "CHAR": true,
+ "CHARACTER": true,
+ "CHECK": true,
+ "COLLATE": true,
+ "COLUMN": true,
+ "CONDITION": true,
+ "CONNECTION": true,
+ "CONSTRAINT": true,
+ "CONTINUE": true,
+ "CONVERT": true,
+ "CREATE": true,
+ "CROSS": true,
+ "CURRENT_DATE": true,
+ "CURRENT_TIME": true,
+ "CURRENT_TIMESTAMP": true,
+ "CURRENT_USER": true,
+ "CURSOR": true,
+ "DATABASE": true,
+ "DATABASES": true,
+ "DAY_HOUR": true,
+ "DAY_MICROSECOND": true,
+ "DAY_MINUTE": true,
+ "DAY_SECOND": true,
+ "DEC": true,
+ "DECIMAL": true,
+ "DECLARE": true,
+ "DEFAULT": true,
+ "DELAYED": true,
+ "DELETE": true,
+ "DESC": true,
+ "DESCRIBE": true,
+ "DETERMINISTIC": true,
+ "DISTINCT": true,
+ "DISTINCTROW": true,
+ "DIV": true,
+ "DOUBLE": true,
+ "DROP": true,
+ "DUAL": true,
+ "EACH": true,
+ "ELSE": true,
+ "ELSEIF": true,
+ "ENCLOSED": true,
+ "ESCAPED": true,
+ "EXISTS": true,
+ "EXIT": true,
+ "EXPLAIN": true,
+ "FALSE": true,
+ "FETCH": true,
+ "FLOAT": true,
+ "FLOAT4": true,
+ "FLOAT8": true,
+ "FOR": true,
+ "FORCE": true,
+ "FOREIGN": true,
+ "FROM": true,
+ "FULLTEXT": true,
+ "GOTO": true,
+ "GRANT": true,
+ "GROUP": true,
+ "HAVING": true,
+ "HIGH_PRIORITY": true,
+ "HOUR_MICROSECOND": true,
+ "HOUR_MINUTE": true,
+ "HOUR_SECOND": true,
+ "IF": true,
+ "IGNORE": true,
+ "IN": true, "INDEX": true,
+ "INFILE": true, "INNER": true, "INOUT": true,
+ "INSENSITIVE": true, "INSERT": true, "INT": true,
+ "INT1": true, "INT2": true, "INT3": true,
+ "INT4": true, "INT8": true, "INTEGER": true,
+ "INTERVAL": true, "INTO": true, "IS": true,
+ "ITERATE": true, "JOIN": true, "KEY": true,
+ "KEYS": true, "KILL": true, "LABEL": true,
+ "LEADING": true, "LEAVE": true, "LEFT": true,
+ "LIKE": true, "LIMIT": true, "LINEAR": true,
+ "LINES": true, "LOAD": true, "LOCALTIME": true,
+ "LOCALTIMESTAMP": true, "LOCK": true, "LONG": true,
+ "LONGBLOB": true, "LONGTEXT": true, "LOOP": true,
+ "LOW_PRIORITY": true, "MATCH": true, "MEDIUMBLOB": true,
+ "MEDIUMINT": true, "MEDIUMTEXT": true, "MIDDLEINT": true,
+ "MINUTE_MICROSECOND": true, "MINUTE_SECOND": true, "MOD": true,
+ "MODIFIES": true, "NATURAL": true, "NOT": true,
+ "NO_WRITE_TO_BINLOG": true, "NULL": true, "NUMERIC": true,
+ "ON OPTIMIZE": true, "OPTION": true,
+ "OPTIONALLY": true, "OR": true, "ORDER": true,
+ "OUT": true, "OUTER": true, "OUTFILE": true,
+ "PRECISION": true, "PRIMARY": true, "PROCEDURE": true,
+ "PURGE": true, "RAID0": true, "RANGE": true,
+ "READ": true, "READS": true, "REAL": true,
+ "REFERENCES": true, "REGEXP": true, "RELEASE": true,
+ "RENAME": true, "REPEAT": true, "REPLACE": true,
+ "REQUIRE": true, "RESTRICT": true, "RETURN": true,
+ "REVOKE": true, "RIGHT": true, "RLIKE": true,
+ "SCHEMA": true, "SCHEMAS": true, "SECOND_MICROSECOND": true,
+ "SELECT": true, "SENSITIVE": true, "SEPARATOR": true,
+ "SET": true, "SHOW": true, "SMALLINT": true,
+ "SPATIAL": true, "SPECIFIC": true, "SQL": true,
+ "SQLEXCEPTION": true, "SQLSTATE": true, "SQLWARNING": true,
+ "SQL_BIG_RESULT": true, "SQL_CALC_FOUND_ROWS": true, "SQL_SMALL_RESULT": true,
+ "SSL": true, "STARTING": true, "STRAIGHT_JOIN": true,
+ "TABLE": true, "TERMINATED": true, "THEN": true,
+ "TINYBLOB": true, "TINYINT": true, "TINYTEXT": true,
+ "TO": true, "TRAILING": true, "TRIGGER": true,
+ "TRUE": true, "UNDO": true, "UNION": true,
+ "UNIQUE": true, "UNLOCK": true, "UNSIGNED": true,
+ "UPDATE": true, "USAGE": true, "USE": true,
+ "USING": true, "UTC_DATE": true, "UTC_TIME": true,
+ "UTC_TIMESTAMP": true, "VALUES": true, "VARBINARY": true,
+ "VARCHAR": true,
+ "VARCHARACTER": true,
+ "VARYING": true,
+ "WHEN": true,
+ "WHERE": true,
+ "WHILE": true,
+ "WITH": true,
+ "WRITE": true,
+ "X509": true,
+ "XOR": true,
+ "YEAR_MONTH": true,
+ "ZEROFILL": true,
+ }
+)
+
+type mysql struct {
+ core.Base
+ net string
+ addr string
+ params map[string]string
+ loc *time.Location
+ timeout time.Duration
+ tls *tls.Config
+ allowAllFiles bool
+ allowOldPasswords bool
+ clientFoundRows bool
+ rowFormat string
+}
+
+func (db *mysql) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error {
+ return db.Base.Init(d, db, uri, drivername, dataSourceName)
+}
+
+func (db *mysql) SetParams(params map[string]string) {
+ rowFormat, ok := params["rowFormat"]
+ if ok {
+ var t = strings.ToUpper(rowFormat)
+ switch t {
+ case "COMPACT":
+ fallthrough
+ case "REDUNDANT":
+ fallthrough
+ case "DYNAMIC":
+ fallthrough
+ case "COMPRESSED":
+ db.rowFormat = t
+ break
+ default:
+ break
+ }
+ }
+}
+
+func (db *mysql) SqlType(c *core.Column) string {
+ var res string
+ switch t := c.SQLType.Name; t {
+ case core.Bool:
+ res = core.TinyInt
+ c.Length = 1
+ case core.Serial:
+ c.IsAutoIncrement = true
+ c.IsPrimaryKey = true
+ c.Nullable = false
+ res = core.Int
+ case core.BigSerial:
+ c.IsAutoIncrement = true
+ c.IsPrimaryKey = true
+ c.Nullable = false
+ res = core.BigInt
+ case core.Bytea:
+ res = core.Blob
+ case core.TimeStampz:
+ res = core.Char
+ c.Length = 64
+ case core.Enum: // mysql enum
+ res = core.Enum
+ res += "("
+ opts := ""
+ for v := range c.EnumOptions {
+ opts += fmt.Sprintf(",'%v'", v)
+ }
+ res += strings.TrimLeft(opts, ",")
+ res += ")"
+ case core.Set: // mysql set
+ res = core.Set
+ res += "("
+ opts := ""
+ for v := range c.SetOptions {
+ opts += fmt.Sprintf(",'%v'", v)
+ }
+ res += strings.TrimLeft(opts, ",")
+ res += ")"
+ case core.NVarchar:
+ res = core.Varchar
+ case core.Uuid:
+ res = core.Varchar
+ c.Length = 40
+ case core.Json:
+ res = core.Text
+ default:
+ res = t
+ }
+
+ hasLen1 := (c.Length > 0)
+ hasLen2 := (c.Length2 > 0)
+
+ if res == core.BigInt && !hasLen1 && !hasLen2 {
+ c.Length = 20
+ hasLen1 = true
+ }
+
+ if hasLen2 {
+ res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
+ } else if hasLen1 {
+ res += "(" + strconv.Itoa(c.Length) + ")"
+ }
+ return res
+}
+
+func (db *mysql) SupportInsertMany() bool {
+ return true
+}
+
+func (db *mysql) IsReserved(name string) bool {
+ _, ok := mysqlReservedWords[name]
+ return ok
+}
+
+func (db *mysql) Quote(name string) string {
+ return "`" + name + "`"
+}
+
+func (db *mysql) SupportEngine() bool {
+ return true
+}
+
+func (db *mysql) AutoIncrStr() string {
+ return "AUTO_INCREMENT"
+}
+
+func (db *mysql) SupportCharset() bool {
+ return true
+}
+
+func (db *mysql) IndexOnTable() bool {
+ return true
+}
+
+func (db *mysql) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
+ args := []interface{}{db.DbName, tableName, idxName}
+ sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`"
+ sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?"
+ return sql, args
+}
+
+/*func (db *mysql) ColumnCheckSql(tableName, colName string) (string, []interface{}) {
+ args := []interface{}{db.DbName, tableName, colName}
+ sql := "SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `COLUMN_NAME` = ?"
+ return sql, args
+}*/
+
+func (db *mysql) TableCheckSql(tableName string) (string, []interface{}) {
+ args := []interface{}{db.DbName, tableName}
+ sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?"
+ return sql, args
+}
+
+func (db *mysql) GetColumns(tableName string) ([]string, map[string]*core.Column, error) {
+ args := []interface{}{db.DbName, tableName}
+ s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," +
+ " `COLUMN_KEY`, `EXTRA`,`COLUMN_COMMENT` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?"
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer rows.Close()
+
+ cols := make(map[string]*core.Column)
+ colSeq := make([]string, 0)
+ for rows.Next() {
+ col := new(core.Column)
+ col.Indexes = make(map[string]int)
+
+ var columnName, isNullable, colType, colKey, extra, comment string
+ var colDefault *string
+ err = rows.Scan(&columnName, &isNullable, &colDefault, &colType, &colKey, &extra, &comment)
+ if err != nil {
+ return nil, nil, err
+ }
+ col.Name = strings.Trim(columnName, "` ")
+ col.Comment = comment
+ if "YES" == isNullable {
+ col.Nullable = true
+ }
+
+ if colDefault != nil {
+ col.Default = *colDefault
+ if col.Default == "" {
+ col.DefaultIsEmpty = true
+ }
+ }
+
+ cts := strings.Split(colType, "(")
+ colName := cts[0]
+ colType = strings.ToUpper(colName)
+ var len1, len2 int
+ if len(cts) == 2 {
+ idx := strings.Index(cts[1], ")")
+ if colType == core.Enum && cts[1][0] == '\'' { // enum
+ options := strings.Split(cts[1][0:idx], ",")
+ col.EnumOptions = make(map[string]int)
+ for k, v := range options {
+ v = strings.TrimSpace(v)
+ v = strings.Trim(v, "'")
+ col.EnumOptions[v] = k
+ }
+ } else if colType == core.Set && cts[1][0] == '\'' {
+ options := strings.Split(cts[1][0:idx], ",")
+ col.SetOptions = make(map[string]int)
+ for k, v := range options {
+ v = strings.TrimSpace(v)
+ v = strings.Trim(v, "'")
+ col.SetOptions[v] = k
+ }
+ } else {
+ lens := strings.Split(cts[1][0:idx], ",")
+ len1, err = strconv.Atoi(strings.TrimSpace(lens[0]))
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(lens) == 2 {
+ len2, err = strconv.Atoi(lens[1])
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+ }
+ if colType == "FLOAT UNSIGNED" {
+ colType = "FLOAT"
+ }
+ if colType == "DOUBLE UNSIGNED" {
+ colType = "DOUBLE"
+ }
+ col.Length = len1
+ col.Length2 = len2
+ if _, ok := core.SqlTypes[colType]; ok {
+ col.SQLType = core.SQLType{Name: colType, DefaultLength: len1, DefaultLength2: len2}
+ } else {
+ return nil, nil, fmt.Errorf("Unknown colType %v", colType)
+ }
+
+ if colKey == "PRI" {
+ col.IsPrimaryKey = true
+ }
+ if colKey == "UNI" {
+ // col.is
+ }
+
+ if extra == "auto_increment" {
+ col.IsAutoIncrement = true
+ }
+
+ if col.SQLType.IsText() || col.SQLType.IsTime() {
+ if col.Default != "" {
+ col.Default = "'" + col.Default + "'"
+ } else {
+ if col.DefaultIsEmpty {
+ col.Default = "''"
+ }
+ }
+ }
+ cols[col.Name] = col
+ colSeq = append(colSeq, col.Name)
+ }
+ return colSeq, cols, nil
+}
+
+func (db *mysql) GetTables() ([]*core.Table, error) {
+ args := []interface{}{db.DbName}
+ s := "SELECT `TABLE_NAME`, `ENGINE`, `TABLE_ROWS`, `AUTO_INCREMENT`, `TABLE_COMMENT` from " +
+ "`INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? AND (`ENGINE`='MyISAM' OR `ENGINE` = 'InnoDB' OR `ENGINE` = 'TokuDB')"
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ tables := make([]*core.Table, 0)
+ for rows.Next() {
+ table := core.NewEmptyTable()
+ var name, engine, tableRows, comment string
+ var autoIncr *string
+ err = rows.Scan(&name, &engine, &tableRows, &autoIncr, &comment)
+ if err != nil {
+ return nil, err
+ }
+
+ table.Name = name
+ table.Comment = comment
+ table.StoreEngine = engine
+ tables = append(tables, table)
+ }
+ return tables, nil
+}
+
+func (db *mysql) GetIndexes(tableName string) (map[string]*core.Index, error) {
+ args := []interface{}{db.DbName, tableName}
+ s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?"
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ indexes := make(map[string]*core.Index, 0)
+ for rows.Next() {
+ var indexType int
+ var indexName, colName, nonUnique string
+ err = rows.Scan(&indexName, &nonUnique, &colName)
+ if err != nil {
+ return nil, err
+ }
+
+ if indexName == "PRIMARY" {
+ continue
+ }
+
+ if "YES" == nonUnique || nonUnique == "1" {
+ indexType = core.IndexType
+ } else {
+ indexType = core.UniqueType
+ }
+
+ colName = strings.Trim(colName, "` ")
+ var isRegular bool
+ if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
+ indexName = indexName[5+len(tableName):]
+ isRegular = true
+ }
+
+ var index *core.Index
+ var ok bool
+ if index, ok = indexes[indexName]; !ok {
+ index = new(core.Index)
+ index.IsRegular = isRegular
+ index.Type = indexType
+ index.Name = indexName
+ indexes[indexName] = index
+ }
+ index.AddColumn(colName)
+ }
+ return indexes, nil
+}
+
+func (db *mysql) CreateTableSql(table *core.Table, tableName, storeEngine, charset string) string {
+ var sql string
+ sql = "CREATE TABLE IF NOT EXISTS "
+ if tableName == "" {
+ tableName = table.Name
+ }
+
+ sql += db.Quote(tableName)
+ sql += " ("
+
+ if len(table.ColumnsSeq()) > 0 {
+ pkList := table.PrimaryKeys
+
+ for _, colName := range table.ColumnsSeq() {
+ col := table.GetColumn(colName)
+ if col.IsPrimaryKey && len(pkList) == 1 {
+ sql += col.String(db)
+ } else {
+ sql += col.StringNoPk(db)
+ }
+ sql = strings.TrimSpace(sql)
+ if len(col.Comment) > 0 {
+ sql += " COMMENT '" + col.Comment + "'"
+ }
+ sql += ", "
+ }
+
+ if len(pkList) > 1 {
+ sql += "PRIMARY KEY ( "
+ sql += db.Quote(strings.Join(pkList, db.Quote(",")))
+ sql += " ), "
+ }
+
+ sql = sql[:len(sql)-2]
+ }
+ sql += ")"
+
+ if storeEngine != "" {
+ sql += " ENGINE=" + storeEngine
+ }
+
+ if len(charset) == 0 {
+ charset = db.URI().Charset
+ }
+ if len(charset) != 0 {
+ sql += " DEFAULT CHARSET " + charset
+ }
+
+ if db.rowFormat != "" {
+ sql += " ROW_FORMAT=" + db.rowFormat
+ }
+ return sql
+}
+
+func (db *mysql) Filters() []core.Filter {
+ return []core.Filter{&core.IdFilter{}}
+}
+
+type mymysqlDriver struct {
+}
+
+func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) {
+ db := &core.Uri{DbType: core.MYSQL}
+
+ pd := strings.SplitN(dataSourceName, "*", 2)
+ if len(pd) == 2 {
+ // Parse protocol part of URI
+ p := strings.SplitN(pd[0], ":", 2)
+ if len(p) != 2 {
+ return nil, errors.New("Wrong protocol part of URI")
+ }
+ db.Proto = p[0]
+ options := strings.Split(p[1], ",")
+ db.Raddr = options[0]
+ for _, o := range options[1:] {
+ kv := strings.SplitN(o, "=", 2)
+ var k, v string
+ if len(kv) == 2 {
+ k, v = kv[0], kv[1]
+ } else {
+ k, v = o, "true"
+ }
+ switch k {
+ case "laddr":
+ db.Laddr = v
+ case "timeout":
+ to, err := time.ParseDuration(v)
+ if err != nil {
+ return nil, err
+ }
+ db.Timeout = to
+ default:
+ return nil, errors.New("Unknown option: " + k)
+ }
+ }
+ // Remove protocol part
+ pd = pd[1:]
+ }
+ // Parse database part of URI
+ dup := strings.SplitN(pd[0], "/", 3)
+ if len(dup) != 3 {
+ return nil, errors.New("Wrong database part of URI")
+ }
+ db.DbName = dup[0]
+ db.User = dup[1]
+ db.Passwd = dup[2]
+
+ return db, nil
+}
+
+type mysqlDriver struct {
+}
+
+func (p *mysqlDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) {
+ dsnPattern := regexp.MustCompile(
+ `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@]
+ `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]]
+ `\/(?P.*?)` + // /dbname
+ `(?:\?(?P[^\?]*))?$`) // [?param1=value1¶mN=valueN]
+ matches := dsnPattern.FindStringSubmatch(dataSourceName)
+ // tlsConfigRegister := make(map[string]*tls.Config)
+ names := dsnPattern.SubexpNames()
+
+ uri := &core.Uri{DbType: core.MYSQL}
+
+ for i, match := range matches {
+ switch names[i] {
+ case "dbname":
+ uri.DbName = match
+ case "params":
+ if len(match) > 0 {
+ kvs := strings.Split(match, "&")
+ for _, kv := range kvs {
+ splits := strings.Split(kv, "=")
+ if len(splits) == 2 {
+ switch splits[0] {
+ case "charset":
+ uri.Charset = splits[1]
+ }
+ }
+ }
+ }
+
+ }
+ }
+ return uri, nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/dialect_oracle.go b/vendor/github.com/go-xorm/xorm/dialect_oracle.go
new file mode 100644
index 0000000..15010ca
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/dialect_oracle.go
@@ -0,0 +1,902 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "xorm.io/core"
+)
+
+var (
+ oracleReservedWords = map[string]bool{
+ "ACCESS": true,
+ "ACCOUNT": true,
+ "ACTIVATE": true,
+ "ADD": true,
+ "ADMIN": true,
+ "ADVISE": true,
+ "AFTER": true,
+ "ALL": true,
+ "ALL_ROWS": true,
+ "ALLOCATE": true,
+ "ALTER": true,
+ "ANALYZE": true,
+ "AND": true,
+ "ANY": true,
+ "ARCHIVE": true,
+ "ARCHIVELOG": true,
+ "ARRAY": true,
+ "AS": true,
+ "ASC": true,
+ "AT": true,
+ "AUDIT": true,
+ "AUTHENTICATED": true,
+ "AUTHORIZATION": true,
+ "AUTOEXTEND": true,
+ "AUTOMATIC": true,
+ "BACKUP": true,
+ "BECOME": true,
+ "BEFORE": true,
+ "BEGIN": true,
+ "BETWEEN": true,
+ "BFILE": true,
+ "BITMAP": true,
+ "BLOB": true,
+ "BLOCK": true,
+ "BODY": true,
+ "BY": true,
+ "CACHE": true,
+ "CACHE_INSTANCES": true,
+ "CANCEL": true,
+ "CASCADE": true,
+ "CAST": true,
+ "CFILE": true,
+ "CHAINED": true,
+ "CHANGE": true,
+ "CHAR": true,
+ "CHAR_CS": true,
+ "CHARACTER": true,
+ "CHECK": true,
+ "CHECKPOINT": true,
+ "CHOOSE": true,
+ "CHUNK": true,
+ "CLEAR": true,
+ "CLOB": true,
+ "CLONE": true,
+ "CLOSE": true,
+ "CLOSE_CACHED_OPEN_CURSORS": true,
+ "CLUSTER": true,
+ "COALESCE": true,
+ "COLUMN": true,
+ "COLUMNS": true,
+ "COMMENT": true,
+ "COMMIT": true,
+ "COMMITTED": true,
+ "COMPATIBILITY": true,
+ "COMPILE": true,
+ "COMPLETE": true,
+ "COMPOSITE_LIMIT": true,
+ "COMPRESS": true,
+ "COMPUTE": true,
+ "CONNECT": true,
+ "CONNECT_TIME": true,
+ "CONSTRAINT": true,
+ "CONSTRAINTS": true,
+ "CONTENTS": true,
+ "CONTINUE": true,
+ "CONTROLFILE": true,
+ "CONVERT": true,
+ "COST": true,
+ "CPU_PER_CALL": true,
+ "CPU_PER_SESSION": true,
+ "CREATE": true,
+ "CURRENT": true,
+ "CURRENT_SCHEMA": true,
+ "CURREN_USER": true,
+ "CURSOR": true,
+ "CYCLE": true,
+ "DANGLING": true,
+ "DATABASE": true,
+ "DATAFILE": true,
+ "DATAFILES": true,
+ "DATAOBJNO": true,
+ "DATE": true,
+ "DBA": true,
+ "DBHIGH": true,
+ "DBLOW": true,
+ "DBMAC": true,
+ "DEALLOCATE": true,
+ "DEBUG": true,
+ "DEC": true,
+ "DECIMAL": true,
+ "DECLARE": true,
+ "DEFAULT": true,
+ "DEFERRABLE": true,
+ "DEFERRED": true,
+ "DEGREE": true,
+ "DELETE": true,
+ "DEREF": true,
+ "DESC": true,
+ "DIRECTORY": true,
+ "DISABLE": true,
+ "DISCONNECT": true,
+ "DISMOUNT": true,
+ "DISTINCT": true,
+ "DISTRIBUTED": true,
+ "DML": true,
+ "DOUBLE": true,
+ "DROP": true,
+ "DUMP": true,
+ "EACH": true,
+ "ELSE": true,
+ "ENABLE": true,
+ "END": true,
+ "ENFORCE": true,
+ "ENTRY": true,
+ "ESCAPE": true,
+ "EXCEPT": true,
+ "EXCEPTIONS": true,
+ "EXCHANGE": true,
+ "EXCLUDING": true,
+ "EXCLUSIVE": true,
+ "EXECUTE": true,
+ "EXISTS": true,
+ "EXPIRE": true,
+ "EXPLAIN": true,
+ "EXTENT": true,
+ "EXTENTS": true,
+ "EXTERNALLY": true,
+ "FAILED_LOGIN_ATTEMPTS": true,
+ "FALSE": true,
+ "FAST": true,
+ "FILE": true,
+ "FIRST_ROWS": true,
+ "FLAGGER": true,
+ "FLOAT": true,
+ "FLOB": true,
+ "FLUSH": true,
+ "FOR": true,
+ "FORCE": true,
+ "FOREIGN": true,
+ "FREELIST": true,
+ "FREELISTS": true,
+ "FROM": true,
+ "FULL": true,
+ "FUNCTION": true,
+ "GLOBAL": true,
+ "GLOBALLY": true,
+ "GLOBAL_NAME": true,
+ "GRANT": true,
+ "GROUP": true,
+ "GROUPS": true,
+ "HASH": true,
+ "HASHKEYS": true,
+ "HAVING": true,
+ "HEADER": true,
+ "HEAP": true,
+ "IDENTIFIED": true,
+ "IDGENERATORS": true,
+ "IDLE_TIME": true,
+ "IF": true,
+ "IMMEDIATE": true,
+ "IN": true,
+ "INCLUDING": true,
+ "INCREMENT": true,
+ "INDEX": true,
+ "INDEXED": true,
+ "INDEXES": true,
+ "INDICATOR": true,
+ "IND_PARTITION": true,
+ "INITIAL": true,
+ "INITIALLY": true,
+ "INITRANS": true,
+ "INSERT": true,
+ "INSTANCE": true,
+ "INSTANCES": true,
+ "INSTEAD": true,
+ "INT": true,
+ "INTEGER": true,
+ "INTERMEDIATE": true,
+ "INTERSECT": true,
+ "INTO": true,
+ "IS": true,
+ "ISOLATION": true,
+ "ISOLATION_LEVEL": true,
+ "KEEP": true,
+ "KEY": true,
+ "KILL": true,
+ "LABEL": true,
+ "LAYER": true,
+ "LESS": true,
+ "LEVEL": true,
+ "LIBRARY": true,
+ "LIKE": true,
+ "LIMIT": true,
+ "LINK": true,
+ "LIST": true,
+ "LOB": true,
+ "LOCAL": true,
+ "LOCK": true,
+ "LOCKED": true,
+ "LOG": true,
+ "LOGFILE": true,
+ "LOGGING": true,
+ "LOGICAL_READS_PER_CALL": true,
+ "LOGICAL_READS_PER_SESSION": true,
+ "LONG": true,
+ "MANAGE": true,
+ "MASTER": true,
+ "MAX": true,
+ "MAXARCHLOGS": true,
+ "MAXDATAFILES": true,
+ "MAXEXTENTS": true,
+ "MAXINSTANCES": true,
+ "MAXLOGFILES": true,
+ "MAXLOGHISTORY": true,
+ "MAXLOGMEMBERS": true,
+ "MAXSIZE": true,
+ "MAXTRANS": true,
+ "MAXVALUE": true,
+ "MIN": true,
+ "MEMBER": true,
+ "MINIMUM": true,
+ "MINEXTENTS": true,
+ "MINUS": true,
+ "MINVALUE": true,
+ "MLSLABEL": true,
+ "MLS_LABEL_FORMAT": true,
+ "MODE": true,
+ "MODIFY": true,
+ "MOUNT": true,
+ "MOVE": true,
+ "MTS_DISPATCHERS": true,
+ "MULTISET": true,
+ "NATIONAL": true,
+ "NCHAR": true,
+ "NCHAR_CS": true,
+ "NCLOB": true,
+ "NEEDED": true,
+ "NESTED": true,
+ "NETWORK": true,
+ "NEW": true,
+ "NEXT": true,
+ "NOARCHIVELOG": true,
+ "NOAUDIT": true,
+ "NOCACHE": true,
+ "NOCOMPRESS": true,
+ "NOCYCLE": true,
+ "NOFORCE": true,
+ "NOLOGGING": true,
+ "NOMAXVALUE": true,
+ "NOMINVALUE": true,
+ "NONE": true,
+ "NOORDER": true,
+ "NOOVERRIDE": true,
+ "NOPARALLEL": true,
+ "NOREVERSE": true,
+ "NORMAL": true,
+ "NOSORT": true,
+ "NOT": true,
+ "NOTHING": true,
+ "NOWAIT": true,
+ "NULL": true,
+ "NUMBER": true,
+ "NUMERIC": true,
+ "NVARCHAR2": true,
+ "OBJECT": true,
+ "OBJNO": true,
+ "OBJNO_REUSE": true,
+ "OF": true,
+ "OFF": true,
+ "OFFLINE": true,
+ "OID": true,
+ "OIDINDEX": true,
+ "OLD": true,
+ "ON": true,
+ "ONLINE": true,
+ "ONLY": true,
+ "OPCODE": true,
+ "OPEN": true,
+ "OPTIMAL": true,
+ "OPTIMIZER_GOAL": true,
+ "OPTION": true,
+ "OR": true,
+ "ORDER": true,
+ "ORGANIZATION": true,
+ "OSLABEL": true,
+ "OVERFLOW": true,
+ "OWN": true,
+ "PACKAGE": true,
+ "PARALLEL": true,
+ "PARTITION": true,
+ "PASSWORD": true,
+ "PASSWORD_GRACE_TIME": true,
+ "PASSWORD_LIFE_TIME": true,
+ "PASSWORD_LOCK_TIME": true,
+ "PASSWORD_REUSE_MAX": true,
+ "PASSWORD_REUSE_TIME": true,
+ "PASSWORD_VERIFY_FUNCTION": true,
+ "PCTFREE": true,
+ "PCTINCREASE": true,
+ "PCTTHRESHOLD": true,
+ "PCTUSED": true,
+ "PCTVERSION": true,
+ "PERCENT": true,
+ "PERMANENT": true,
+ "PLAN": true,
+ "PLSQL_DEBUG": true,
+ "POST_TRANSACTION": true,
+ "PRECISION": true,
+ "PRESERVE": true,
+ "PRIMARY": true,
+ "PRIOR": true,
+ "PRIVATE": true,
+ "PRIVATE_SGA": true,
+ "PRIVILEGE": true,
+ "PRIVILEGES": true,
+ "PROCEDURE": true,
+ "PROFILE": true,
+ "PUBLIC": true,
+ "PURGE": true,
+ "QUEUE": true,
+ "QUOTA": true,
+ "RANGE": true,
+ "RAW": true,
+ "RBA": true,
+ "READ": true,
+ "READUP": true,
+ "REAL": true,
+ "REBUILD": true,
+ "RECOVER": true,
+ "RECOVERABLE": true,
+ "RECOVERY": true,
+ "REF": true,
+ "REFERENCES": true,
+ "REFERENCING": true,
+ "REFRESH": true,
+ "RENAME": true,
+ "REPLACE": true,
+ "RESET": true,
+ "RESETLOGS": true,
+ "RESIZE": true,
+ "RESOURCE": true,
+ "RESTRICTED": true,
+ "RETURN": true,
+ "RETURNING": true,
+ "REUSE": true,
+ "REVERSE": true,
+ "REVOKE": true,
+ "ROLE": true,
+ "ROLES": true,
+ "ROLLBACK": true,
+ "ROW": true,
+ "ROWID": true,
+ "ROWNUM": true,
+ "ROWS": true,
+ "RULE": true,
+ "SAMPLE": true,
+ "SAVEPOINT": true,
+ "SB4": true,
+ "SCAN_INSTANCES": true,
+ "SCHEMA": true,
+ "SCN": true,
+ "SCOPE": true,
+ "SD_ALL": true,
+ "SD_INHIBIT": true,
+ "SD_SHOW": true,
+ "SEGMENT": true,
+ "SEG_BLOCK": true,
+ "SEG_FILE": true,
+ "SELECT": true,
+ "SEQUENCE": true,
+ "SERIALIZABLE": true,
+ "SESSION": true,
+ "SESSION_CACHED_CURSORS": true,
+ "SESSIONS_PER_USER": true,
+ "SET": true,
+ "SHARE": true,
+ "SHARED": true,
+ "SHARED_POOL": true,
+ "SHRINK": true,
+ "SIZE": true,
+ "SKIP": true,
+ "SKIP_UNUSABLE_INDEXES": true,
+ "SMALLINT": true,
+ "SNAPSHOT": true,
+ "SOME": true,
+ "SORT": true,
+ "SPECIFICATION": true,
+ "SPLIT": true,
+ "SQL_TRACE": true,
+ "STANDBY": true,
+ "START": true,
+ "STATEMENT_ID": true,
+ "STATISTICS": true,
+ "STOP": true,
+ "STORAGE": true,
+ "STORE": true,
+ "STRUCTURE": true,
+ "SUCCESSFUL": true,
+ "SWITCH": true,
+ "SYS_OP_ENFORCE_NOT_NULL$": true,
+ "SYS_OP_NTCIMG$": true,
+ "SYNONYM": true,
+ "SYSDATE": true,
+ "SYSDBA": true,
+ "SYSOPER": true,
+ "SYSTEM": true,
+ "TABLE": true,
+ "TABLES": true,
+ "TABLESPACE": true,
+ "TABLESPACE_NO": true,
+ "TABNO": true,
+ "TEMPORARY": true,
+ "THAN": true,
+ "THE": true,
+ "THEN": true,
+ "THREAD": true,
+ "TIMESTAMP": true,
+ "TIME": true,
+ "TO": true,
+ "TOPLEVEL": true,
+ "TRACE": true,
+ "TRACING": true,
+ "TRANSACTION": true,
+ "TRANSITIONAL": true,
+ "TRIGGER": true,
+ "TRIGGERS": true,
+ "TRUE": true,
+ "TRUNCATE": true,
+ "TX": true,
+ "TYPE": true,
+ "UB2": true,
+ "UBA": true,
+ "UID": true,
+ "UNARCHIVED": true,
+ "UNDO": true,
+ "UNION": true,
+ "UNIQUE": true,
+ "UNLIMITED": true,
+ "UNLOCK": true,
+ "UNRECOVERABLE": true,
+ "UNTIL": true,
+ "UNUSABLE": true,
+ "UNUSED": true,
+ "UPDATABLE": true,
+ "UPDATE": true,
+ "USAGE": true,
+ "USE": true,
+ "USER": true,
+ "USING": true,
+ "VALIDATE": true,
+ "VALIDATION": true,
+ "VALUE": true,
+ "VALUES": true,
+ "VARCHAR": true,
+ "VARCHAR2": true,
+ "VARYING": true,
+ "VIEW": true,
+ "WHEN": true,
+ "WHENEVER": true,
+ "WHERE": true,
+ "WITH": true,
+ "WITHOUT": true,
+ "WORK": true,
+ "WRITE": true,
+ "WRITEDOWN": true,
+ "WRITEUP": true,
+ "XID": true,
+ "YEAR": true,
+ "ZONE": true,
+ }
+)
+
+type oracle struct {
+ core.Base
+}
+
+func (db *oracle) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error {
+ return db.Base.Init(d, db, uri, drivername, dataSourceName)
+}
+
+func (db *oracle) SqlType(c *core.Column) string {
+ var res string
+ switch t := c.SQLType.Name; t {
+ case core.Bit, core.TinyInt, core.SmallInt, core.MediumInt, core.Int, core.Integer, core.BigInt, core.Bool, core.Serial, core.BigSerial:
+ res = "NUMBER"
+ case core.Binary, core.VarBinary, core.Blob, core.TinyBlob, core.MediumBlob, core.LongBlob, core.Bytea:
+ return core.Blob
+ case core.Time, core.DateTime, core.TimeStamp:
+ res = core.TimeStamp
+ case core.TimeStampz:
+ res = "TIMESTAMP WITH TIME ZONE"
+ case core.Float, core.Double, core.Numeric, core.Decimal:
+ res = "NUMBER"
+ case core.Text, core.MediumText, core.LongText, core.Json:
+ res = "CLOB"
+ case core.Char, core.Varchar, core.TinyText:
+ res = "VARCHAR2"
+ default:
+ res = t
+ }
+
+ hasLen1 := (c.Length > 0)
+ hasLen2 := (c.Length2 > 0)
+
+ if hasLen2 {
+ res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
+ } else if hasLen1 {
+ res += "(" + strconv.Itoa(c.Length) + ")"
+ }
+ return res
+}
+
+func (db *oracle) AutoIncrStr() string {
+ return "AUTO_INCREMENT"
+}
+
+func (db *oracle) SupportInsertMany() bool {
+ return true
+}
+
+func (db *oracle) IsReserved(name string) bool {
+ _, ok := oracleReservedWords[name]
+ return ok
+}
+
+func (db *oracle) Quote(name string) string {
+ return "[" + name + "]"
+}
+
+func (db *oracle) SupportEngine() bool {
+ return false
+}
+
+func (db *oracle) SupportCharset() bool {
+ return false
+}
+
+func (db *oracle) SupportDropIfExists() bool {
+ return false
+}
+
+func (db *oracle) IndexOnTable() bool {
+ return false
+}
+
+func (db *oracle) DropTableSql(tableName string) string {
+ return fmt.Sprintf("DROP TABLE `%s`", tableName)
+}
+
+func (db *oracle) CreateTableSql(table *core.Table, tableName, storeEngine, charset string) string {
+ var sql string
+ sql = "CREATE TABLE "
+ if tableName == "" {
+ tableName = table.Name
+ }
+
+ sql += db.Quote(tableName) + " ("
+
+ pkList := table.PrimaryKeys
+
+ for _, colName := range table.ColumnsSeq() {
+ col := table.GetColumn(colName)
+ /*if col.IsPrimaryKey && len(pkList) == 1 {
+ sql += col.String(b.dialect)
+ } else {*/
+ sql += col.StringNoPk(db)
+ // }
+ sql = strings.TrimSpace(sql)
+ sql += ", "
+ }
+
+ if len(pkList) > 0 {
+ sql += "PRIMARY KEY ( "
+ sql += db.Quote(strings.Join(pkList, db.Quote(",")))
+ sql += " ), "
+ }
+
+ sql = sql[:len(sql)-2] + ")"
+ if db.SupportEngine() && storeEngine != "" {
+ sql += " ENGINE=" + storeEngine
+ }
+ if db.SupportCharset() {
+ if len(charset) == 0 {
+ charset = db.URI().Charset
+ }
+ if len(charset) > 0 {
+ sql += " DEFAULT CHARSET " + charset
+ }
+ }
+ return sql
+}
+
+func (db *oracle) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
+ args := []interface{}{tableName, idxName}
+ return `SELECT INDEX_NAME FROM USER_INDEXES ` +
+ `WHERE TABLE_NAME = :1 AND INDEX_NAME = :2`, args
+}
+
+func (db *oracle) TableCheckSql(tableName string) (string, []interface{}) {
+ args := []interface{}{tableName}
+ return `SELECT table_name FROM user_tables WHERE table_name = :1`, args
+}
+
+func (db *oracle) MustDropTable(tableName string) error {
+ sql, args := db.TableCheckSql(tableName)
+ db.LogSQL(sql, args)
+
+ rows, err := db.DB().Query(sql, args...)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ if !rows.Next() {
+ return nil
+ }
+
+ sql = "Drop Table \"" + tableName + "\""
+ db.LogSQL(sql, args)
+
+ _, err = db.DB().Exec(sql)
+ return err
+}
+
+/*func (db *oracle) ColumnCheckSql(tableName, colName string) (string, []interface{}) {
+ args := []interface{}{strings.ToUpper(tableName), strings.ToUpper(colName)}
+ return "SELECT column_name FROM USER_TAB_COLUMNS WHERE table_name = ?" +
+ " AND column_name = ?", args
+}*/
+
+func (db *oracle) IsColumnExist(tableName, colName string) (bool, error) {
+ args := []interface{}{tableName, colName}
+ query := "SELECT column_name FROM USER_TAB_COLUMNS WHERE table_name = :1" +
+ " AND column_name = :2"
+ db.LogSQL(query, args)
+
+ rows, err := db.DB().Query(query, args...)
+ if err != nil {
+ return false, err
+ }
+ defer rows.Close()
+
+ if rows.Next() {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (db *oracle) GetColumns(tableName string) ([]string, map[string]*core.Column, error) {
+ args := []interface{}{tableName}
+ s := "SELECT column_name,data_default,data_type,data_length,data_precision,data_scale," +
+ "nullable FROM USER_TAB_COLUMNS WHERE table_name = :1"
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer rows.Close()
+
+ cols := make(map[string]*core.Column)
+ colSeq := make([]string, 0)
+ for rows.Next() {
+ col := new(core.Column)
+ col.Indexes = make(map[string]int)
+
+ var colName, colDefault, nullable, dataType, dataPrecision, dataScale *string
+ var dataLen int
+
+ err = rows.Scan(&colName, &colDefault, &dataType, &dataLen, &dataPrecision,
+ &dataScale, &nullable)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ col.Name = strings.Trim(*colName, `" `)
+ if colDefault != nil {
+ col.Default = *colDefault
+ col.DefaultIsEmpty = false
+ }
+
+ if *nullable == "Y" {
+ col.Nullable = true
+ } else {
+ col.Nullable = false
+ }
+
+ var ignore bool
+
+ var dt string
+ var len1, len2 int
+ dts := strings.Split(*dataType, "(")
+ dt = dts[0]
+ if len(dts) > 1 {
+ lens := strings.Split(dts[1][:len(dts[1])-1], ",")
+ if len(lens) > 1 {
+ len1, _ = strconv.Atoi(lens[0])
+ len2, _ = strconv.Atoi(lens[1])
+ } else {
+ len1, _ = strconv.Atoi(lens[0])
+ }
+ }
+
+ switch dt {
+ case "VARCHAR2":
+ col.SQLType = core.SQLType{Name: core.Varchar, DefaultLength: len1, DefaultLength2: len2}
+ case "NVARCHAR2":
+ col.SQLType = core.SQLType{Name: core.NVarchar, DefaultLength: len1, DefaultLength2: len2}
+ case "TIMESTAMP WITH TIME ZONE":
+ col.SQLType = core.SQLType{Name: core.TimeStampz, DefaultLength: 0, DefaultLength2: 0}
+ case "NUMBER":
+ col.SQLType = core.SQLType{Name: core.Double, DefaultLength: len1, DefaultLength2: len2}
+ case "LONG", "LONG RAW":
+ col.SQLType = core.SQLType{Name: core.Text, DefaultLength: 0, DefaultLength2: 0}
+ case "RAW":
+ col.SQLType = core.SQLType{Name: core.Binary, DefaultLength: 0, DefaultLength2: 0}
+ case "ROWID":
+ col.SQLType = core.SQLType{Name: core.Varchar, DefaultLength: 18, DefaultLength2: 0}
+ case "AQ$_SUBSCRIBERS":
+ ignore = true
+ default:
+ col.SQLType = core.SQLType{Name: strings.ToUpper(dt), DefaultLength: len1, DefaultLength2: len2}
+ }
+
+ if ignore {
+ continue
+ }
+
+ if _, ok := core.SqlTypes[col.SQLType.Name]; !ok {
+ return nil, nil, fmt.Errorf("Unknown colType %v %v", *dataType, col.SQLType)
+ }
+
+ col.Length = dataLen
+
+ if col.SQLType.IsText() || col.SQLType.IsTime() {
+ if !col.DefaultIsEmpty {
+ col.Default = "'" + col.Default + "'"
+ }
+ }
+ cols[col.Name] = col
+ colSeq = append(colSeq, col.Name)
+ }
+
+ return colSeq, cols, nil
+}
+
+func (db *oracle) GetTables() ([]*core.Table, error) {
+ args := []interface{}{}
+ s := "SELECT table_name FROM user_tables"
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ tables := make([]*core.Table, 0)
+ for rows.Next() {
+ table := core.NewEmptyTable()
+ err = rows.Scan(&table.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ tables = append(tables, table)
+ }
+ return tables, nil
+}
+
+func (db *oracle) GetIndexes(tableName string) (map[string]*core.Index, error) {
+ args := []interface{}{tableName}
+ s := "SELECT t.column_name,i.uniqueness,i.index_name FROM user_ind_columns t,user_indexes i " +
+ "WHERE t.index_name = i.index_name and t.table_name = i.table_name and t.table_name =:1"
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ indexes := make(map[string]*core.Index, 0)
+ for rows.Next() {
+ var indexType int
+ var indexName, colName, uniqueness string
+
+ err = rows.Scan(&colName, &uniqueness, &indexName)
+ if err != nil {
+ return nil, err
+ }
+
+ indexName = strings.Trim(indexName, `" `)
+
+ var isRegular bool
+ if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
+ indexName = indexName[5+len(tableName):]
+ isRegular = true
+ }
+
+ if uniqueness == "UNIQUE" {
+ indexType = core.UniqueType
+ } else {
+ indexType = core.IndexType
+ }
+
+ var index *core.Index
+ var ok bool
+ if index, ok = indexes[indexName]; !ok {
+ index = new(core.Index)
+ index.Type = indexType
+ index.Name = indexName
+ index.IsRegular = isRegular
+ indexes[indexName] = index
+ }
+ index.AddColumn(colName)
+ }
+ return indexes, nil
+}
+
+func (db *oracle) Filters() []core.Filter {
+ return []core.Filter{&core.QuoteFilter{}, &core.SeqFilter{Prefix: ":", Start: 1}, &core.IdFilter{}}
+}
+
+type goracleDriver struct {
+}
+
+func (cfg *goracleDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) {
+ db := &core.Uri{DbType: core.ORACLE}
+ dsnPattern := regexp.MustCompile(
+ `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@]
+ `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]]
+ `\/(?P.*?)` + // /dbname
+ `(?:\?(?P[^\?]*))?$`) // [?param1=value1¶mN=valueN]
+ matches := dsnPattern.FindStringSubmatch(dataSourceName)
+ // tlsConfigRegister := make(map[string]*tls.Config)
+ names := dsnPattern.SubexpNames()
+
+ for i, match := range matches {
+ switch names[i] {
+ case "dbname":
+ db.DbName = match
+ }
+ }
+ if db.DbName == "" {
+ return nil, errors.New("dbname is empty")
+ }
+ return db, nil
+}
+
+type oci8Driver struct {
+}
+
+// dataSourceName=user/password@ipv4:port/dbname
+// dataSourceName=user/password@[ipv6]:port/dbname
+func (p *oci8Driver) Parse(driverName, dataSourceName string) (*core.Uri, error) {
+ db := &core.Uri{DbType: core.ORACLE}
+ dsnPattern := regexp.MustCompile(
+ `^(?P.*)\/(?P.*)@` + // user:password@
+ `(?P.*)` + // ip:port
+ `\/(?P.*)`) // dbname
+ matches := dsnPattern.FindStringSubmatch(dataSourceName)
+ names := dsnPattern.SubexpNames()
+ for i, match := range matches {
+ switch names[i] {
+ case "dbname":
+ db.DbName = match
+ }
+ }
+ if db.DbName == "" {
+ return nil, errors.New("dbname is empty")
+ }
+ return db, nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/dialect_postgres.go b/vendor/github.com/go-xorm/xorm/dialect_postgres.go
new file mode 100644
index 0000000..e1c377a
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/dialect_postgres.go
@@ -0,0 +1,1247 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "xorm.io/core"
+)
+
+// from http://www.postgresql.org/docs/current/static/sql-keywords-appendix.html
+var (
+ postgresReservedWords = map[string]bool{
+ "A": true,
+ "ABORT": true,
+ "ABS": true,
+ "ABSENT": true,
+ "ABSOLUTE": true,
+ "ACCESS": true,
+ "ACCORDING": true,
+ "ACTION": true,
+ "ADA": true,
+ "ADD": true,
+ "ADMIN": true,
+ "AFTER": true,
+ "AGGREGATE": true,
+ "ALL": true,
+ "ALLOCATE": true,
+ "ALSO": true,
+ "ALTER": true,
+ "ALWAYS": true,
+ "ANALYSE": true,
+ "ANALYZE": true,
+ "AND": true,
+ "ANY": true,
+ "ARE": true,
+ "ARRAY": true,
+ "ARRAY_AGG": true,
+ "ARRAY_MAX_CARDINALITY": true,
+ "AS": true,
+ "ASC": true,
+ "ASENSITIVE": true,
+ "ASSERTION": true,
+ "ASSIGNMENT": true,
+ "ASYMMETRIC": true,
+ "AT": true,
+ "ATOMIC": true,
+ "ATTRIBUTE": true,
+ "ATTRIBUTES": true,
+ "AUTHORIZATION": true,
+ "AVG": true,
+ "BACKWARD": true,
+ "BASE64": true,
+ "BEFORE": true,
+ "BEGIN": true,
+ "BEGIN_FRAME": true,
+ "BEGIN_PARTITION": true,
+ "BERNOULLI": true,
+ "BETWEEN": true,
+ "BIGINT": true,
+ "BINARY": true,
+ "BIT": true,
+ "BIT_LENGTH": true,
+ "BLOB": true,
+ "BLOCKED": true,
+ "BOM": true,
+ "BOOLEAN": true,
+ "BOTH": true,
+ "BREADTH": true,
+ "BY": true,
+ "C": true,
+ "CACHE": true,
+ "CALL": true,
+ "CALLED": true,
+ "CARDINALITY": true,
+ "CASCADE": true,
+ "CASCADED": true,
+ "CASE": true,
+ "CAST": true,
+ "CATALOG": true,
+ "CATALOG_NAME": true,
+ "CEIL": true,
+ "CEILING": true,
+ "CHAIN": true,
+ "CHAR": true,
+ "CHARACTER": true,
+ "CHARACTERISTICS": true,
+ "CHARACTERS": true,
+ "CHARACTER_LENGTH": true,
+ "CHARACTER_SET_CATALOG": true,
+ "CHARACTER_SET_NAME": true,
+ "CHARACTER_SET_SCHEMA": true,
+ "CHAR_LENGTH": true,
+ "CHECK": true,
+ "CHECKPOINT": true,
+ "CLASS": true,
+ "CLASS_ORIGIN": true,
+ "CLOB": true,
+ "CLOSE": true,
+ "CLUSTER": true,
+ "COALESCE": true,
+ "COBOL": true,
+ "COLLATE": true,
+ "COLLATION": true,
+ "COLLATION_CATALOG": true,
+ "COLLATION_NAME": true,
+ "COLLATION_SCHEMA": true,
+ "COLLECT": true,
+ "COLUMN": true,
+ "COLUMNS": true,
+ "COLUMN_NAME": true,
+ "COMMAND_FUNCTION": true,
+ "COMMAND_FUNCTION_CODE": true,
+ "COMMENT": true,
+ "COMMENTS": true,
+ "COMMIT": true,
+ "COMMITTED": true,
+ "CONCURRENTLY": true,
+ "CONDITION": true,
+ "CONDITION_NUMBER": true,
+ "CONFIGURATION": true,
+ "CONNECT": true,
+ "CONNECTION": true,
+ "CONNECTION_NAME": true,
+ "CONSTRAINT": true,
+ "CONSTRAINTS": true,
+ "CONSTRAINT_CATALOG": true,
+ "CONSTRAINT_NAME": true,
+ "CONSTRAINT_SCHEMA": true,
+ "CONSTRUCTOR": true,
+ "CONTAINS": true,
+ "CONTENT": true,
+ "CONTINUE": true,
+ "CONTROL": true,
+ "CONVERSION": true,
+ "CONVERT": true,
+ "COPY": true,
+ "CORR": true,
+ "CORRESPONDING": true,
+ "COST": true,
+ "COUNT": true,
+ "COVAR_POP": true,
+ "COVAR_SAMP": true,
+ "CREATE": true,
+ "CROSS": true,
+ "CSV": true,
+ "CUBE": true,
+ "CUME_DIST": true,
+ "CURRENT": true,
+ "CURRENT_CATALOG": true,
+ "CURRENT_DATE": true,
+ "CURRENT_DEFAULT_TRANSFORM_GROUP": true,
+ "CURRENT_PATH": true,
+ "CURRENT_ROLE": true,
+ "CURRENT_ROW": true,
+ "CURRENT_SCHEMA": true,
+ "CURRENT_TIME": true,
+ "CURRENT_TIMESTAMP": true,
+ "CURRENT_TRANSFORM_GROUP_FOR_TYPE": true,
+ "CURRENT_USER": true,
+ "CURSOR": true,
+ "CURSOR_NAME": true,
+ "CYCLE": true,
+ "DATA": true,
+ "DATABASE": true,
+ "DATALINK": true,
+ "DATE": true,
+ "DATETIME_INTERVAL_CODE": true,
+ "DATETIME_INTERVAL_PRECISION": true,
+ "DAY": true,
+ "DB": true,
+ "DEALLOCATE": true,
+ "DEC": true,
+ "DECIMAL": true,
+ "DECLARE": true,
+ "DEFAULT": true,
+ "DEFAULTS": true,
+ "DEFERRABLE": true,
+ "DEFERRED": true,
+ "DEFINED": true,
+ "DEFINER": true,
+ "DEGREE": true,
+ "DELETE": true,
+ "DELIMITER": true,
+ "DELIMITERS": true,
+ "DENSE_RANK": true,
+ "DEPTH": true,
+ "DEREF": true,
+ "DERIVED": true,
+ "DESC": true,
+ "DESCRIBE": true,
+ "DESCRIPTOR": true,
+ "DETERMINISTIC": true,
+ "DIAGNOSTICS": true,
+ "DICTIONARY": true,
+ "DISABLE": true,
+ "DISCARD": true,
+ "DISCONNECT": true,
+ "DISPATCH": true,
+ "DISTINCT": true,
+ "DLNEWCOPY": true,
+ "DLPREVIOUSCOPY": true,
+ "DLURLCOMPLETE": true,
+ "DLURLCOMPLETEONLY": true,
+ "DLURLCOMPLETEWRITE": true,
+ "DLURLPATH": true,
+ "DLURLPATHONLY": true,
+ "DLURLPATHWRITE": true,
+ "DLURLSCHEME": true,
+ "DLURLSERVER": true,
+ "DLVALUE": true,
+ "DO": true,
+ "DOCUMENT": true,
+ "DOMAIN": true,
+ "DOUBLE": true,
+ "DROP": true,
+ "DYNAMIC": true,
+ "DYNAMIC_FUNCTION": true,
+ "DYNAMIC_FUNCTION_CODE": true,
+ "EACH": true,
+ "ELEMENT": true,
+ "ELSE": true,
+ "EMPTY": true,
+ "ENABLE": true,
+ "ENCODING": true,
+ "ENCRYPTED": true,
+ "END": true,
+ "END-EXEC": true,
+ "END_FRAME": true,
+ "END_PARTITION": true,
+ "ENFORCED": true,
+ "ENUM": true,
+ "EQUALS": true,
+ "ESCAPE": true,
+ "EVENT": true,
+ "EVERY": true,
+ "EXCEPT": true,
+ "EXCEPTION": true,
+ "EXCLUDE": true,
+ "EXCLUDING": true,
+ "EXCLUSIVE": true,
+ "EXEC": true,
+ "EXECUTE": true,
+ "EXISTS": true,
+ "EXP": true,
+ "EXPLAIN": true,
+ "EXPRESSION": true,
+ "EXTENSION": true,
+ "EXTERNAL": true,
+ "EXTRACT": true,
+ "FALSE": true,
+ "FAMILY": true,
+ "FETCH": true,
+ "FILE": true,
+ "FILTER": true,
+ "FINAL": true,
+ "FIRST": true,
+ "FIRST_VALUE": true,
+ "FLAG": true,
+ "FLOAT": true,
+ "FLOOR": true,
+ "FOLLOWING": true,
+ "FOR": true,
+ "FORCE": true,
+ "FOREIGN": true,
+ "FORTRAN": true,
+ "FORWARD": true,
+ "FOUND": true,
+ "FRAME_ROW": true,
+ "FREE": true,
+ "FREEZE": true,
+ "FROM": true,
+ "FS": true,
+ "FULL": true,
+ "FUNCTION": true,
+ "FUNCTIONS": true,
+ "FUSION": true,
+ "G": true,
+ "GENERAL": true,
+ "GENERATED": true,
+ "GET": true,
+ "GLOBAL": true,
+ "GO": true,
+ "GOTO": true,
+ "GRANT": true,
+ "GRANTED": true,
+ "GREATEST": true,
+ "GROUP": true,
+ "GROUPING": true,
+ "GROUPS": true,
+ "HANDLER": true,
+ "HAVING": true,
+ "HEADER": true,
+ "HEX": true,
+ "HIERARCHY": true,
+ "HOLD": true,
+ "HOUR": true,
+ "ID": true,
+ "IDENTITY": true,
+ "IF": true,
+ "IGNORE": true,
+ "ILIKE": true,
+ "IMMEDIATE": true,
+ "IMMEDIATELY": true,
+ "IMMUTABLE": true,
+ "IMPLEMENTATION": true,
+ "IMPLICIT": true,
+ "IMPORT": true,
+ "IN": true,
+ "INCLUDING": true,
+ "INCREMENT": true,
+ "INDENT": true,
+ "INDEX": true,
+ "INDEXES": true,
+ "INDICATOR": true,
+ "INHERIT": true,
+ "INHERITS": true,
+ "INITIALLY": true,
+ "INLINE": true,
+ "INNER": true,
+ "INOUT": true,
+ "INPUT": true,
+ "INSENSITIVE": true,
+ "INSERT": true,
+ "INSTANCE": true,
+ "INSTANTIABLE": true,
+ "INSTEAD": true,
+ "INT": true,
+ "INTEGER": true,
+ "INTEGRITY": true,
+ "INTERSECT": true,
+ "INTERSECTION": true,
+ "INTERVAL": true,
+ "INTO": true,
+ "INVOKER": true,
+ "IS": true,
+ "ISNULL": true,
+ "ISOLATION": true,
+ "JOIN": true,
+ "K": true,
+ "KEY": true,
+ "KEY_MEMBER": true,
+ "KEY_TYPE": true,
+ "LABEL": true,
+ "LAG": true,
+ "LANGUAGE": true,
+ "LARGE": true,
+ "LAST": true,
+ "LAST_VALUE": true,
+ "LATERAL": true,
+ "LC_COLLATE": true,
+ "LC_CTYPE": true,
+ "LEAD": true,
+ "LEADING": true,
+ "LEAKPROOF": true,
+ "LEAST": true,
+ "LEFT": true,
+ "LENGTH": true,
+ "LEVEL": true,
+ "LIBRARY": true,
+ "LIKE": true,
+ "LIKE_REGEX": true,
+ "LIMIT": true,
+ "LINK": true,
+ "LISTEN": true,
+ "LN": true,
+ "LOAD": true,
+ "LOCAL": true,
+ "LOCALTIME": true,
+ "LOCALTIMESTAMP": true,
+ "LOCATION": true,
+ "LOCATOR": true,
+ "LOCK": true,
+ "LOWER": true,
+ "M": true,
+ "MAP": true,
+ "MAPPING": true,
+ "MATCH": true,
+ "MATCHED": true,
+ "MATERIALIZED": true,
+ "MAX": true,
+ "MAXVALUE": true,
+ "MAX_CARDINALITY": true,
+ "MEMBER": true,
+ "MERGE": true,
+ "MESSAGE_LENGTH": true,
+ "MESSAGE_OCTET_LENGTH": true,
+ "MESSAGE_TEXT": true,
+ "METHOD": true,
+ "MIN": true,
+ "MINUTE": true,
+ "MINVALUE": true,
+ "MOD": true,
+ "MODE": true,
+ "MODIFIES": true,
+ "MODULE": true,
+ "MONTH": true,
+ "MORE": true,
+ "MOVE": true,
+ "MULTISET": true,
+ "MUMPS": true,
+ "NAME": true,
+ "NAMES": true,
+ "NAMESPACE": true,
+ "NATIONAL": true,
+ "NATURAL": true,
+ "NCHAR": true,
+ "NCLOB": true,
+ "NESTING": true,
+ "NEW": true,
+ "NEXT": true,
+ "NFC": true,
+ "NFD": true,
+ "NFKC": true,
+ "NFKD": true,
+ "NIL": true,
+ "NO": true,
+ "NONE": true,
+ "NORMALIZE": true,
+ "NORMALIZED": true,
+ "NOT": true,
+ "NOTHING": true,
+ "NOTIFY": true,
+ "NOTNULL": true,
+ "NOWAIT": true,
+ "NTH_VALUE": true,
+ "NTILE": true,
+ "NULL": true,
+ "NULLABLE": true,
+ "NULLIF": true,
+ "NULLS": true,
+ "NUMBER": true,
+ "NUMERIC": true,
+ "OBJECT": true,
+ "OCCURRENCES_REGEX": true,
+ "OCTETS": true,
+ "OCTET_LENGTH": true,
+ "OF": true,
+ "OFF": true,
+ "OFFSET": true,
+ "OIDS": true,
+ "OLD": true,
+ "ON": true,
+ "ONLY": true,
+ "OPEN": true,
+ "OPERATOR": true,
+ "OPTION": true,
+ "OPTIONS": true,
+ "OR": true,
+ "ORDER": true,
+ "ORDERING": true,
+ "ORDINALITY": true,
+ "OTHERS": true,
+ "OUT": true,
+ "OUTER": true,
+ "OUTPUT": true,
+ "OVER": true,
+ "OVERLAPS": true,
+ "OVERLAY": true,
+ "OVERRIDING": true,
+ "OWNED": true,
+ "OWNER": true,
+ "P": true,
+ "PAD": true,
+ "PARAMETER": true,
+ "PARAMETER_MODE": true,
+ "PARAMETER_NAME": true,
+ "PARAMETER_ORDINAL_POSITION": true,
+ "PARAMETER_SPECIFIC_CATALOG": true,
+ "PARAMETER_SPECIFIC_NAME": true,
+ "PARAMETER_SPECIFIC_SCHEMA": true,
+ "PARSER": true,
+ "PARTIAL": true,
+ "PARTITION": true,
+ "PASCAL": true,
+ "PASSING": true,
+ "PASSTHROUGH": true,
+ "PASSWORD": true,
+ "PATH": true,
+ "PERCENT": true,
+ "PERCENTILE_CONT": true,
+ "PERCENTILE_DISC": true,
+ "PERCENT_RANK": true,
+ "PERIOD": true,
+ "PERMISSION": true,
+ "PLACING": true,
+ "PLANS": true,
+ "PLI": true,
+ "PORTION": true,
+ "POSITION": true,
+ "POSITION_REGEX": true,
+ "POWER": true,
+ "PRECEDES": true,
+ "PRECEDING": true,
+ "PRECISION": true,
+ "PREPARE": true,
+ "PREPARED": true,
+ "PRESERVE": true,
+ "PRIMARY": true,
+ "PRIOR": true,
+ "PRIVILEGES": true,
+ "PROCEDURAL": true,
+ "PROCEDURE": true,
+ "PROGRAM": true,
+ "PUBLIC": true,
+ "QUOTE": true,
+ "RANGE": true,
+ "RANK": true,
+ "READ": true,
+ "READS": true,
+ "REAL": true,
+ "REASSIGN": true,
+ "RECHECK": true,
+ "RECOVERY": true,
+ "RECURSIVE": true,
+ "REF": true,
+ "REFERENCES": true,
+ "REFERENCING": true,
+ "REFRESH": true,
+ "REGR_AVGX": true,
+ "REGR_AVGY": true,
+ "REGR_COUNT": true,
+ "REGR_INTERCEPT": true,
+ "REGR_R2": true,
+ "REGR_SLOPE": true,
+ "REGR_SXX": true,
+ "REGR_SXY": true,
+ "REGR_SYY": true,
+ "REINDEX": true,
+ "RELATIVE": true,
+ "RELEASE": true,
+ "RENAME": true,
+ "REPEATABLE": true,
+ "REPLACE": true,
+ "REPLICA": true,
+ "REQUIRING": true,
+ "RESET": true,
+ "RESPECT": true,
+ "RESTART": true,
+ "RESTORE": true,
+ "RESTRICT": true,
+ "RESULT": true,
+ "RETURN": true,
+ "RETURNED_CARDINALITY": true,
+ "RETURNED_LENGTH": true,
+ "RETURNED_OCTET_LENGTH": true,
+ "RETURNED_SQLSTATE": true,
+ "RETURNING": true,
+ "RETURNS": true,
+ "REVOKE": true,
+ "RIGHT": true,
+ "ROLE": true,
+ "ROLLBACK": true,
+ "ROLLUP": true,
+ "ROUTINE": true,
+ "ROUTINE_CATALOG": true,
+ "ROUTINE_NAME": true,
+ "ROUTINE_SCHEMA": true,
+ "ROW": true,
+ "ROWS": true,
+ "ROW_COUNT": true,
+ "ROW_NUMBER": true,
+ "RULE": true,
+ "SAVEPOINT": true,
+ "SCALE": true,
+ "SCHEMA": true,
+ "SCHEMA_NAME": true,
+ "SCOPE": true,
+ "SCOPE_CATALOG": true,
+ "SCOPE_NAME": true,
+ "SCOPE_SCHEMA": true,
+ "SCROLL": true,
+ "SEARCH": true,
+ "SECOND": true,
+ "SECTION": true,
+ "SECURITY": true,
+ "SELECT": true,
+ "SELECTIVE": true,
+ "SELF": true,
+ "SENSITIVE": true,
+ "SEQUENCE": true,
+ "SEQUENCES": true,
+ "SERIALIZABLE": true,
+ "SERVER": true,
+ "SERVER_NAME": true,
+ "SESSION": true,
+ "SESSION_USER": true,
+ "SET": true,
+ "SETOF": true,
+ "SETS": true,
+ "SHARE": true,
+ "SHOW": true,
+ "SIMILAR": true,
+ "SIMPLE": true,
+ "SIZE": true,
+ "SMALLINT": true,
+ "SNAPSHOT": true,
+ "SOME": true,
+ "SOURCE": true,
+ "SPACE": true,
+ "SPECIFIC": true,
+ "SPECIFICTYPE": true,
+ "SPECIFIC_NAME": true,
+ "SQL": true,
+ "SQLCODE": true,
+ "SQLERROR": true,
+ "SQLEXCEPTION": true,
+ "SQLSTATE": true,
+ "SQLWARNING": true,
+ "SQRT": true,
+ "STABLE": true,
+ "STANDALONE": true,
+ "START": true,
+ "STATE": true,
+ "STATEMENT": true,
+ "STATIC": true,
+ "STATISTICS": true,
+ "STDDEV_POP": true,
+ "STDDEV_SAMP": true,
+ "STDIN": true,
+ "STDOUT": true,
+ "STORAGE": true,
+ "STRICT": true,
+ "STRIP": true,
+ "STRUCTURE": true,
+ "STYLE": true,
+ "SUBCLASS_ORIGIN": true,
+ "SUBMULTISET": true,
+ "SUBSTRING": true,
+ "SUBSTRING_REGEX": true,
+ "SUCCEEDS": true,
+ "SUM": true,
+ "SYMMETRIC": true,
+ "SYSID": true,
+ "SYSTEM": true,
+ "SYSTEM_TIME": true,
+ "SYSTEM_USER": true,
+ "T": true,
+ "TABLE": true,
+ "TABLES": true,
+ "TABLESAMPLE": true,
+ "TABLESPACE": true,
+ "TABLE_NAME": true,
+ "TEMP": true,
+ "TEMPLATE": true,
+ "TEMPORARY": true,
+ "TEXT": true,
+ "THEN": true,
+ "TIES": true,
+ "TIME": true,
+ "TIMESTAMP": true,
+ "TIMEZONE_HOUR": true,
+ "TIMEZONE_MINUTE": true,
+ "TO": true,
+ "TOKEN": true,
+ "TOP_LEVEL_COUNT": true,
+ "TRAILING": true,
+ "TRANSACTION": true,
+ "TRANSACTIONS_COMMITTED": true,
+ "TRANSACTIONS_ROLLED_BACK": true,
+ "TRANSACTION_ACTIVE": true,
+ "TRANSFORM": true,
+ "TRANSFORMS": true,
+ "TRANSLATE": true,
+ "TRANSLATE_REGEX": true,
+ "TRANSLATION": true,
+ "TREAT": true,
+ "TRIGGER": true,
+ "TRIGGER_CATALOG": true,
+ "TRIGGER_NAME": true,
+ "TRIGGER_SCHEMA": true,
+ "TRIM": true,
+ "TRIM_ARRAY": true,
+ "TRUE": true,
+ "TRUNCATE": true,
+ "TRUSTED": true,
+ "TYPE": true,
+ "TYPES": true,
+ "UESCAPE": true,
+ "UNBOUNDED": true,
+ "UNCOMMITTED": true,
+ "UNDER": true,
+ "UNENCRYPTED": true,
+ "UNION": true,
+ "UNIQUE": true,
+ "UNKNOWN": true,
+ "UNLINK": true,
+ "UNLISTEN": true,
+ "UNLOGGED": true,
+ "UNNAMED": true,
+ "UNNEST": true,
+ "UNTIL": true,
+ "UNTYPED": true,
+ "UPDATE": true,
+ "UPPER": true,
+ "URI": true,
+ "USAGE": true,
+ "USER": true,
+ "USER_DEFINED_TYPE_CATALOG": true,
+ "USER_DEFINED_TYPE_CODE": true,
+ "USER_DEFINED_TYPE_NAME": true,
+ "USER_DEFINED_TYPE_SCHEMA": true,
+ "USING": true,
+ "VACUUM": true,
+ "VALID": true,
+ "VALIDATE": true,
+ "VALIDATOR": true,
+ "VALUE": true,
+ "VALUES": true,
+ "VALUE_OF": true,
+ "VARBINARY": true,
+ "VARCHAR": true,
+ "VARIADIC": true,
+ "VARYING": true,
+ "VAR_POP": true,
+ "VAR_SAMP": true,
+ "VERBOSE": true,
+ "VERSION": true,
+ "VERSIONING": true,
+ "VIEW": true,
+ "VOLATILE": true,
+ "WHEN": true,
+ "WHENEVER": true,
+ "WHERE": true,
+ "WHITESPACE": true,
+ "WIDTH_BUCKET": true,
+ "WINDOW": true,
+ "WITH": true,
+ "WITHIN": true,
+ "WITHOUT": true,
+ "WORK": true,
+ "WRAPPER": true,
+ "WRITE": true,
+ "XML": true,
+ "XMLAGG": true,
+ "XMLATTRIBUTES": true,
+ "XMLBINARY": true,
+ "XMLCAST": true,
+ "XMLCOMMENT": true,
+ "XMLCONCAT": true,
+ "XMLDECLARATION": true,
+ "XMLDOCUMENT": true,
+ "XMLELEMENT": true,
+ "XMLEXISTS": true,
+ "XMLFOREST": true,
+ "XMLITERATE": true,
+ "XMLNAMESPACES": true,
+ "XMLPARSE": true,
+ "XMLPI": true,
+ "XMLQUERY": true,
+ "XMLROOT": true,
+ "XMLSCHEMA": true,
+ "XMLSERIALIZE": true,
+ "XMLTABLE": true,
+ "XMLTEXT": true,
+ "XMLVALIDATE": true,
+ "YEAR": true,
+ "YES": true,
+ "ZONE": true,
+ }
+
+ // DefaultPostgresSchema default postgres schema
+ DefaultPostgresSchema = "public"
+)
+
+const postgresPublicSchema = "public"
+
+type postgres struct {
+ core.Base
+}
+
+func (db *postgres) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error {
+ err := db.Base.Init(d, db, uri, drivername, dataSourceName)
+ if err != nil {
+ return err
+ }
+ if db.Schema == "" {
+ db.Schema = DefaultPostgresSchema
+ }
+ return nil
+}
+
+func (db *postgres) SqlType(c *core.Column) string {
+ var res string
+ switch t := c.SQLType.Name; t {
+ case core.TinyInt:
+ res = core.SmallInt
+ return res
+ case core.Bit:
+ res = core.Boolean
+ return res
+ case core.MediumInt, core.Int, core.Integer:
+ if c.IsAutoIncrement {
+ return core.Serial
+ }
+ return core.Integer
+ case core.BigInt:
+ if c.IsAutoIncrement {
+ return core.BigSerial
+ }
+ return core.BigInt
+ case core.Serial, core.BigSerial:
+ c.IsAutoIncrement = true
+ c.Nullable = false
+ res = t
+ case core.Binary, core.VarBinary:
+ return core.Bytea
+ case core.DateTime:
+ res = core.TimeStamp
+ case core.TimeStampz:
+ return "timestamp with time zone"
+ case core.Float:
+ res = core.Real
+ case core.TinyText, core.MediumText, core.LongText:
+ res = core.Text
+ case core.NVarchar:
+ res = core.Varchar
+ case core.Uuid:
+ return core.Uuid
+ case core.Blob, core.TinyBlob, core.MediumBlob, core.LongBlob:
+ return core.Bytea
+ case core.Double:
+ return "DOUBLE PRECISION"
+ default:
+ if c.IsAutoIncrement {
+ return core.Serial
+ }
+ res = t
+ }
+
+ if strings.EqualFold(res, "bool") {
+ // for bool, we don't need length information
+ return res
+ }
+ hasLen1 := (c.Length > 0)
+ hasLen2 := (c.Length2 > 0)
+
+ if hasLen2 {
+ res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
+ } else if hasLen1 {
+ res += "(" + strconv.Itoa(c.Length) + ")"
+ }
+ return res
+}
+
+func (db *postgres) SupportInsertMany() bool {
+ return true
+}
+
+func (db *postgres) IsReserved(name string) bool {
+ _, ok := postgresReservedWords[name]
+ return ok
+}
+
+func (db *postgres) Quote(name string) string {
+ name = strings.Replace(name, ".", `"."`, -1)
+ return "\"" + name + "\""
+}
+
+func (db *postgres) AutoIncrStr() string {
+ return ""
+}
+
+func (db *postgres) SupportEngine() bool {
+ return false
+}
+
+func (db *postgres) SupportCharset() bool {
+ return false
+}
+
+func (db *postgres) IndexOnTable() bool {
+ return false
+}
+
+func (db *postgres) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
+ if len(db.Schema) == 0 {
+ args := []interface{}{tableName, idxName}
+ return `SELECT indexname FROM pg_indexes WHERE tablename = ? AND indexname = ?`, args
+ }
+
+ args := []interface{}{db.Schema, tableName, idxName}
+ return `SELECT indexname FROM pg_indexes ` +
+ `WHERE schemaname = ? AND tablename = ? AND indexname = ?`, args
+}
+
+func (db *postgres) TableCheckSql(tableName string) (string, []interface{}) {
+ if len(db.Schema) == 0 {
+ args := []interface{}{tableName}
+ return `SELECT tablename FROM pg_tables WHERE tablename = ?`, args
+ }
+
+ args := []interface{}{db.Schema, tableName}
+ return `SELECT tablename FROM pg_tables WHERE schemaname = ? AND tablename = ?`, args
+}
+
+func (db *postgres) ModifyColumnSql(tableName string, col *core.Column) string {
+ if len(db.Schema) == 0 {
+ return fmt.Sprintf("alter table %s ALTER COLUMN %s TYPE %s",
+ tableName, col.Name, db.SqlType(col))
+ }
+ return fmt.Sprintf("alter table %s.%s ALTER COLUMN %s TYPE %s",
+ db.Schema, tableName, col.Name, db.SqlType(col))
+}
+
+func (db *postgres) DropIndexSql(tableName string, index *core.Index) string {
+ quote := db.Quote
+ idxName := index.Name
+
+ tableName = strings.Replace(tableName, `"`, "", -1)
+ tableName = strings.Replace(tableName, `.`, "_", -1)
+
+ if !strings.HasPrefix(idxName, "UQE_") &&
+ !strings.HasPrefix(idxName, "IDX_") {
+ if index.Type == core.UniqueType {
+ idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name)
+ } else {
+ idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name)
+ }
+ }
+ if db.Uri.Schema != "" {
+ idxName = db.Uri.Schema + "." + idxName
+ }
+ return fmt.Sprintf("DROP INDEX %v", quote(idxName))
+}
+
+func (db *postgres) IsColumnExist(tableName, colName string) (bool, error) {
+ args := []interface{}{db.Schema, tableName, colName}
+ query := "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = $1 AND table_name = $2" +
+ " AND column_name = $3"
+ if len(db.Schema) == 0 {
+ args = []interface{}{tableName, colName}
+ query = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1" +
+ " AND column_name = $2"
+ }
+ db.LogSQL(query, args)
+
+ rows, err := db.DB().Query(query, args...)
+ if err != nil {
+ return false, err
+ }
+ defer rows.Close()
+
+ return rows.Next(), nil
+}
+
+func (db *postgres) GetColumns(tableName string) ([]string, map[string]*core.Column, error) {
+ args := []interface{}{tableName}
+ s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, numeric_precision, numeric_precision_radix ,
+ CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey,
+ CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey
+FROM pg_attribute f
+ JOIN pg_class c ON c.oid = f.attrelid JOIN pg_type t ON t.oid = f.atttypid
+ LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
+ LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
+ LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
+ LEFT JOIN pg_class AS g ON p.confrelid = g.oid
+ LEFT JOIN INFORMATION_SCHEMA.COLUMNS s ON s.column_name=f.attname AND c.relname=s.table_name
+WHERE c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.attnum;`
+
+ var f string
+ if len(db.Schema) != 0 {
+ args = append(args, db.Schema)
+ f = " AND s.table_schema = $2"
+ }
+ s = fmt.Sprintf(s, f)
+
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer rows.Close()
+
+ cols := make(map[string]*core.Column)
+ colSeq := make([]string, 0)
+
+ for rows.Next() {
+ col := new(core.Column)
+ col.Indexes = make(map[string]int)
+
+ var colName, isNullable, dataType string
+ var maxLenStr, colDefault, numPrecision, numRadix *string
+ var isPK, isUnique bool
+ err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &numPrecision, &numRadix, &isPK, &isUnique)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // fmt.Println(args, colName, isNullable, dataType, maxLenStr, colDefault, numPrecision, numRadix, isPK, isUnique)
+ var maxLen int
+ if maxLenStr != nil {
+ maxLen, err = strconv.Atoi(*maxLenStr)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ col.Name = strings.Trim(colName, `" `)
+
+ if colDefault != nil || isPK {
+ if isPK {
+ col.IsPrimaryKey = true
+ } else {
+ col.Default = *colDefault
+ }
+ }
+
+ if colDefault != nil && strings.HasPrefix(*colDefault, "nextval(") {
+ col.IsAutoIncrement = true
+ }
+
+ col.Nullable = (isNullable == "YES")
+
+ switch dataType {
+ case "character varying", "character":
+ col.SQLType = core.SQLType{Name: core.Varchar, DefaultLength: 0, DefaultLength2: 0}
+ case "timestamp without time zone":
+ col.SQLType = core.SQLType{Name: core.DateTime, DefaultLength: 0, DefaultLength2: 0}
+ case "timestamp with time zone":
+ col.SQLType = core.SQLType{Name: core.TimeStampz, DefaultLength: 0, DefaultLength2: 0}
+ case "double precision":
+ col.SQLType = core.SQLType{Name: core.Double, DefaultLength: 0, DefaultLength2: 0}
+ case "boolean":
+ col.SQLType = core.SQLType{Name: core.Bool, DefaultLength: 0, DefaultLength2: 0}
+ case "time without time zone":
+ col.SQLType = core.SQLType{Name: core.Time, DefaultLength: 0, DefaultLength2: 0}
+ case "oid":
+ col.SQLType = core.SQLType{Name: core.BigInt, DefaultLength: 0, DefaultLength2: 0}
+ default:
+ col.SQLType = core.SQLType{Name: strings.ToUpper(dataType), DefaultLength: 0, DefaultLength2: 0}
+ }
+ if _, ok := core.SqlTypes[col.SQLType.Name]; !ok {
+ return nil, nil, fmt.Errorf("Unknown colType: %v", dataType)
+ }
+
+ col.Length = maxLen
+
+ if col.SQLType.IsText() || col.SQLType.IsTime() {
+ if col.Default != "" {
+ col.Default = "'" + col.Default + "'"
+ } else {
+ if col.DefaultIsEmpty {
+ col.Default = "''"
+ }
+ }
+ }
+ cols[col.Name] = col
+ colSeq = append(colSeq, col.Name)
+ }
+
+ return colSeq, cols, nil
+}
+
+func (db *postgres) GetTables() ([]*core.Table, error) {
+ args := []interface{}{}
+ s := "SELECT tablename FROM pg_tables"
+ if len(db.Schema) != 0 {
+ args = append(args, db.Schema)
+ s = s + " WHERE schemaname = $1"
+ }
+
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ tables := make([]*core.Table, 0)
+ for rows.Next() {
+ table := core.NewEmptyTable()
+ var name string
+ err = rows.Scan(&name)
+ if err != nil {
+ return nil, err
+ }
+ table.Name = name
+ tables = append(tables, table)
+ }
+ return tables, nil
+}
+
+func getIndexColName(indexdef string) []string {
+ var colNames []string
+
+ cs := strings.Split(indexdef, "(")
+ for _, v := range strings.Split(strings.Split(cs[1], ")")[0], ",") {
+ colNames = append(colNames, strings.Split(strings.TrimLeft(v, " "), " ")[0])
+ }
+
+ return colNames
+}
+
+func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) {
+ args := []interface{}{tableName}
+ s := fmt.Sprintf("SELECT indexname, indexdef FROM pg_indexes WHERE tablename=$1")
+ if len(db.Schema) != 0 {
+ args = append(args, db.Schema)
+ s = s + " AND schemaname=$2"
+ }
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ indexes := make(map[string]*core.Index, 0)
+ for rows.Next() {
+ var indexType int
+ var indexName, indexdef string
+ var colNames []string
+ err = rows.Scan(&indexName, &indexdef)
+ if err != nil {
+ return nil, err
+ }
+ indexName = strings.Trim(indexName, `" `)
+ if strings.HasSuffix(indexName, "_pkey") {
+ continue
+ }
+ if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") {
+ indexType = core.UniqueType
+ } else {
+ indexType = core.IndexType
+ }
+ colNames = getIndexColName(indexdef)
+ var isRegular bool
+ if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
+ newIdxName := indexName[5+len(tableName):]
+ isRegular = true
+ if newIdxName != "" {
+ indexName = newIdxName
+ }
+ }
+
+ index := &core.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)}
+ for _, colName := range colNames {
+ index.Cols = append(index.Cols, strings.Trim(colName, `" `))
+ }
+ index.IsRegular = isRegular
+ indexes[index.Name] = index
+ }
+ return indexes, nil
+}
+
+func (db *postgres) Filters() []core.Filter {
+ return []core.Filter{&core.IdFilter{}, &core.QuoteFilter{}, &core.SeqFilter{Prefix: "$", Start: 1}}
+}
+
+type pqDriver struct {
+}
+
+type values map[string]string
+
+func (vs values) Set(k, v string) {
+ vs[k] = v
+}
+
+func (vs values) Get(k string) (v string) {
+ return vs[k]
+}
+
+func parseURL(connstr string) (string, error) {
+ u, err := url.Parse(connstr)
+ if err != nil {
+ return "", err
+ }
+
+ if u.Scheme != "postgresql" && u.Scheme != "postgres" {
+ return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
+ }
+
+ escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
+
+ if u.Path != "" {
+ return escaper.Replace(u.Path[1:]), nil
+ }
+
+ return "", nil
+}
+
+func parseOpts(name string, o values) error {
+ if len(name) == 0 {
+ return fmt.Errorf("invalid options: %s", name)
+ }
+
+ name = strings.TrimSpace(name)
+
+ ps := strings.Split(name, " ")
+ for _, p := range ps {
+ kv := strings.Split(p, "=")
+ if len(kv) < 2 {
+ return fmt.Errorf("invalid option: %q", p)
+ }
+ o.Set(kv[0], kv[1])
+ }
+
+ return nil
+}
+
+func (p *pqDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) {
+ db := &core.Uri{DbType: core.POSTGRES}
+ var err error
+
+ if strings.HasPrefix(dataSourceName, "postgresql://") || strings.HasPrefix(dataSourceName, "postgres://") {
+ db.DbName, err = parseURL(dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ o := make(values)
+ err = parseOpts(dataSourceName, o)
+ if err != nil {
+ return nil, err
+ }
+
+ db.DbName = o.Get("dbname")
+ }
+
+ if db.DbName == "" {
+ return nil, errors.New("dbname is empty")
+ }
+
+ return db, nil
+}
+
+type pqDriverPgx struct {
+ pqDriver
+}
+
+func (pgx *pqDriverPgx) Parse(driverName, dataSourceName string) (*core.Uri, error) {
+ // Remove the leading characters for driver to work
+ if len(dataSourceName) >= 9 && dataSourceName[0] == 0 {
+ dataSourceName = dataSourceName[9:]
+ }
+ return pgx.pqDriver.Parse(driverName, dataSourceName)
+}
diff --git a/vendor/github.com/go-xorm/xorm/dialect_sqlite3.go b/vendor/github.com/go-xorm/xorm/dialect_sqlite3.go
new file mode 100644
index 0000000..60f0729
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/dialect_sqlite3.go
@@ -0,0 +1,456 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "xorm.io/core"
+)
+
+var (
+ sqlite3ReservedWords = map[string]bool{
+ "ABORT": true,
+ "ACTION": true,
+ "ADD": true,
+ "AFTER": true,
+ "ALL": true,
+ "ALTER": true,
+ "ANALYZE": true,
+ "AND": true,
+ "AS": true,
+ "ASC": true,
+ "ATTACH": true,
+ "AUTOINCREMENT": true,
+ "BEFORE": true,
+ "BEGIN": true,
+ "BETWEEN": true,
+ "BY": true,
+ "CASCADE": true,
+ "CASE": true,
+ "CAST": true,
+ "CHECK": true,
+ "COLLATE": true,
+ "COLUMN": true,
+ "COMMIT": true,
+ "CONFLICT": true,
+ "CONSTRAINT": true,
+ "CREATE": true,
+ "CROSS": true,
+ "CURRENT_DATE": true,
+ "CURRENT_TIME": true,
+ "CURRENT_TIMESTAMP": true,
+ "DATABASE": true,
+ "DEFAULT": true,
+ "DEFERRABLE": true,
+ "DEFERRED": true,
+ "DELETE": true,
+ "DESC": true,
+ "DETACH": true,
+ "DISTINCT": true,
+ "DROP": true,
+ "EACH": true,
+ "ELSE": true,
+ "END": true,
+ "ESCAPE": true,
+ "EXCEPT": true,
+ "EXCLUSIVE": true,
+ "EXISTS": true,
+ "EXPLAIN": true,
+ "FAIL": true,
+ "FOR": true,
+ "FOREIGN": true,
+ "FROM": true,
+ "FULL": true,
+ "GLOB": true,
+ "GROUP": true,
+ "HAVING": true,
+ "IF": true,
+ "IGNORE": true,
+ "IMMEDIATE": true,
+ "IN": true,
+ "INDEX": true,
+ "INDEXED": true,
+ "INITIALLY": true,
+ "INNER": true,
+ "INSERT": true,
+ "INSTEAD": true,
+ "INTERSECT": true,
+ "INTO": true,
+ "IS": true,
+ "ISNULL": true,
+ "JOIN": true,
+ "KEY": true,
+ "LEFT": true,
+ "LIKE": true,
+ "LIMIT": true,
+ "MATCH": true,
+ "NATURAL": true,
+ "NO": true,
+ "NOT": true,
+ "NOTNULL": true,
+ "NULL": true,
+ "OF": true,
+ "OFFSET": true,
+ "ON": true,
+ "OR": true,
+ "ORDER": true,
+ "OUTER": true,
+ "PLAN": true,
+ "PRAGMA": true,
+ "PRIMARY": true,
+ "QUERY": true,
+ "RAISE": true,
+ "RECURSIVE": true,
+ "REFERENCES": true,
+ "REGEXP": true,
+ "REINDEX": true,
+ "RELEASE": true,
+ "RENAME": true,
+ "REPLACE": true,
+ "RESTRICT": true,
+ "RIGHT": true,
+ "ROLLBACK": true,
+ "ROW": true,
+ "SAVEPOINT": true,
+ "SELECT": true,
+ "SET": true,
+ "TABLE": true,
+ "TEMP": true,
+ "TEMPORARY": true,
+ "THEN": true,
+ "TO": true,
+ "TRANSACTI": true,
+ "TRIGGER": true,
+ "UNION": true,
+ "UNIQUE": true,
+ "UPDATE": true,
+ "USING": true,
+ "VACUUM": true,
+ "VALUES": true,
+ "VIEW": true,
+ "VIRTUAL": true,
+ "WHEN": true,
+ "WHERE": true,
+ "WITH": true,
+ "WITHOUT": true,
+ }
+)
+
+type sqlite3 struct {
+ core.Base
+}
+
+func (db *sqlite3) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error {
+ return db.Base.Init(d, db, uri, drivername, dataSourceName)
+}
+
+func (db *sqlite3) SqlType(c *core.Column) string {
+ switch t := c.SQLType.Name; t {
+ case core.Bool:
+ if c.Default == "true" {
+ c.Default = "1"
+ } else if c.Default == "false" {
+ c.Default = "0"
+ }
+ return core.Integer
+ case core.Date, core.DateTime, core.TimeStamp, core.Time:
+ return core.DateTime
+ case core.TimeStampz:
+ return core.Text
+ case core.Char, core.Varchar, core.NVarchar, core.TinyText,
+ core.Text, core.MediumText, core.LongText, core.Json:
+ return core.Text
+ case core.Bit, core.TinyInt, core.SmallInt, core.MediumInt, core.Int, core.Integer, core.BigInt:
+ return core.Integer
+ case core.Float, core.Double, core.Real:
+ return core.Real
+ case core.Decimal, core.Numeric:
+ return core.Numeric
+ case core.TinyBlob, core.Blob, core.MediumBlob, core.LongBlob, core.Bytea, core.Binary, core.VarBinary:
+ return core.Blob
+ case core.Serial, core.BigSerial:
+ c.IsPrimaryKey = true
+ c.IsAutoIncrement = true
+ c.Nullable = false
+ return core.Integer
+ default:
+ return t
+ }
+}
+
+func (db *sqlite3) FormatBytes(bs []byte) string {
+ return fmt.Sprintf("X'%x'", bs)
+}
+
+func (db *sqlite3) SupportInsertMany() bool {
+ return true
+}
+
+func (db *sqlite3) IsReserved(name string) bool {
+ _, ok := sqlite3ReservedWords[name]
+ return ok
+}
+
+func (db *sqlite3) Quote(name string) string {
+ return "`" + name + "`"
+}
+
+func (db *sqlite3) AutoIncrStr() string {
+ return "AUTOINCREMENT"
+}
+
+func (db *sqlite3) SupportEngine() bool {
+ return false
+}
+
+func (db *sqlite3) SupportCharset() bool {
+ return false
+}
+
+func (db *sqlite3) IndexOnTable() bool {
+ return false
+}
+
+func (db *sqlite3) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
+ args := []interface{}{idxName}
+ return "SELECT name FROM sqlite_master WHERE type='index' and name = ?", args
+}
+
+func (db *sqlite3) TableCheckSql(tableName string) (string, []interface{}) {
+ args := []interface{}{tableName}
+ return "SELECT name FROM sqlite_master WHERE type='table' and name = ?", args
+}
+
+func (db *sqlite3) DropIndexSql(tableName string, index *core.Index) string {
+ // var unique string
+ quote := db.Quote
+ idxName := index.Name
+
+ if !strings.HasPrefix(idxName, "UQE_") &&
+ !strings.HasPrefix(idxName, "IDX_") {
+ if index.Type == core.UniqueType {
+ idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name)
+ } else {
+ idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name)
+ }
+ }
+ return fmt.Sprintf("DROP INDEX %v", quote(idxName))
+}
+
+func (db *sqlite3) ForUpdateSql(query string) string {
+ return query
+}
+
+/*func (db *sqlite3) ColumnCheckSql(tableName, colName string) (string, []interface{}) {
+ args := []interface{}{tableName}
+ sql := "SELECT name FROM sqlite_master WHERE type='table' and name = ? and ((sql like '%`" + colName + "`%') or (sql like '%[" + colName + "]%'))"
+ return sql, args
+}*/
+
+func (db *sqlite3) IsColumnExist(tableName, colName string) (bool, error) {
+ args := []interface{}{tableName}
+ query := "SELECT name FROM sqlite_master WHERE type='table' and name = ? and ((sql like '%`" + colName + "`%') or (sql like '%[" + colName + "]%'))"
+ db.LogSQL(query, args)
+ rows, err := db.DB().Query(query, args...)
+ if err != nil {
+ return false, err
+ }
+ defer rows.Close()
+
+ if rows.Next() {
+ return true, nil
+ }
+ return false, nil
+}
+
+func (db *sqlite3) GetColumns(tableName string) ([]string, map[string]*core.Column, error) {
+ args := []interface{}{tableName}
+ s := "SELECT sql FROM sqlite_master WHERE type='table' and name = ?"
+ db.LogSQL(s, args)
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer rows.Close()
+
+ var name string
+ for rows.Next() {
+ err = rows.Scan(&name)
+ if err != nil {
+ return nil, nil, err
+ }
+ break
+ }
+
+ if name == "" {
+ return nil, nil, errors.New("no table named " + tableName)
+ }
+
+ nStart := strings.Index(name, "(")
+ nEnd := strings.LastIndex(name, ")")
+ reg := regexp.MustCompile(`[^\(,\)]*(\([^\(]*\))?`)
+ colCreates := reg.FindAllString(name[nStart+1:nEnd], -1)
+ cols := make(map[string]*core.Column)
+ colSeq := make([]string, 0)
+ for _, colStr := range colCreates {
+ reg = regexp.MustCompile(`,\s`)
+ colStr = reg.ReplaceAllString(colStr, ",")
+ if strings.HasPrefix(strings.TrimSpace(colStr), "PRIMARY KEY") {
+ parts := strings.Split(strings.TrimSpace(colStr), "(")
+ if len(parts) == 2 {
+ pkCols := strings.Split(strings.TrimRight(strings.TrimSpace(parts[1]), ")"), ",")
+ for _, pk := range pkCols {
+ if col, ok := cols[strings.Trim(strings.TrimSpace(pk), "`")]; ok {
+ col.IsPrimaryKey = true
+ }
+ }
+ }
+ continue
+ }
+
+ fields := strings.Fields(strings.TrimSpace(colStr))
+ col := new(core.Column)
+ col.Indexes = make(map[string]int)
+ col.Nullable = true
+ col.DefaultIsEmpty = true
+
+ for idx, field := range fields {
+ if idx == 0 {
+ col.Name = strings.Trim(strings.Trim(field, "`[] "), `"`)
+ continue
+ } else if idx == 1 {
+ col.SQLType = core.SQLType{Name: field, DefaultLength: 0, DefaultLength2: 0}
+ }
+ switch field {
+ case "PRIMARY":
+ col.IsPrimaryKey = true
+ case "AUTOINCREMENT":
+ col.IsAutoIncrement = true
+ case "NULL":
+ if fields[idx-1] == "NOT" {
+ col.Nullable = false
+ } else {
+ col.Nullable = true
+ }
+ case "DEFAULT":
+ col.Default = fields[idx+1]
+ col.DefaultIsEmpty = false
+ }
+ }
+ if !col.SQLType.IsNumeric() && !col.DefaultIsEmpty {
+ col.Default = "'" + col.Default + "'"
+ }
+ cols[col.Name] = col
+ colSeq = append(colSeq, col.Name)
+ }
+ return colSeq, cols, nil
+}
+
+func (db *sqlite3) GetTables() ([]*core.Table, error) {
+ args := []interface{}{}
+ s := "SELECT name FROM sqlite_master WHERE type='table'"
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ tables := make([]*core.Table, 0)
+ for rows.Next() {
+ table := core.NewEmptyTable()
+ err = rows.Scan(&table.Name)
+ if err != nil {
+ return nil, err
+ }
+ if table.Name == "sqlite_sequence" {
+ continue
+ }
+ tables = append(tables, table)
+ }
+ return tables, nil
+}
+
+func (db *sqlite3) GetIndexes(tableName string) (map[string]*core.Index, error) {
+ args := []interface{}{tableName}
+ s := "SELECT sql FROM sqlite_master WHERE type='index' and tbl_name = ?"
+ db.LogSQL(s, args)
+
+ rows, err := db.DB().Query(s, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ indexes := make(map[string]*core.Index, 0)
+ for rows.Next() {
+ var tmpSQL sql.NullString
+ err = rows.Scan(&tmpSQL)
+ if err != nil {
+ return nil, err
+ }
+
+ if !tmpSQL.Valid {
+ continue
+ }
+ sql := tmpSQL.String
+
+ index := new(core.Index)
+ nNStart := strings.Index(sql, "INDEX")
+ nNEnd := strings.Index(sql, "ON")
+ if nNStart == -1 || nNEnd == -1 {
+ continue
+ }
+
+ indexName := strings.Trim(sql[nNStart+6:nNEnd], "` []")
+ var isRegular bool
+ if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
+ index.Name = indexName[5+len(tableName):]
+ isRegular = true
+ } else {
+ index.Name = indexName
+ }
+
+ if strings.HasPrefix(sql, "CREATE UNIQUE INDEX") {
+ index.Type = core.UniqueType
+ } else {
+ index.Type = core.IndexType
+ }
+
+ nStart := strings.Index(sql, "(")
+ nEnd := strings.Index(sql, ")")
+ colIndexes := strings.Split(sql[nStart+1:nEnd], ",")
+
+ index.Cols = make([]string, 0)
+ for _, col := range colIndexes {
+ index.Cols = append(index.Cols, strings.Trim(col, "` []"))
+ }
+ index.IsRegular = isRegular
+ indexes[index.Name] = index
+ }
+
+ return indexes, nil
+}
+
+func (db *sqlite3) Filters() []core.Filter {
+ return []core.Filter{&core.IdFilter{}}
+}
+
+type sqlite3Driver struct {
+}
+
+func (p *sqlite3Driver) Parse(driverName, dataSourceName string) (*core.Uri, error) {
+ if strings.Contains(dataSourceName, "?") {
+ dataSourceName = dataSourceName[:strings.Index(dataSourceName, "?")]
+ }
+
+ return &core.Uri{DbType: core.SQLITE, DbName: dataSourceName}, nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/doc.go b/vendor/github.com/go-xorm/xorm/doc.go
new file mode 100644
index 0000000..a687e69
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/doc.go
@@ -0,0 +1,184 @@
+// Copyright 2013 - 2016 The XORM Authors. All rights reserved.
+// Use of this source code is governed by a BSD
+// license that can be found in the LICENSE file.
+
+/*
+
+Package xorm is a simple and powerful ORM for Go.
+
+Installation
+
+Make sure you have installed Go 1.6+ and then:
+
+ go get github.com/go-xorm/xorm
+
+Create Engine
+
+Firstly, we should new an engine for a database
+
+ engine, err := xorm.NewEngine(driverName, dataSourceName)
+
+Method NewEngine's parameters is the same as sql.Open. It depends
+drivers' implementation.
+Generally, one engine for an application is enough. You can set it as package variable.
+
+Raw Methods
+
+XORM also support raw SQL execution:
+
+1. query a SQL string, the returned results is []map[string][]byte
+
+ results, err := engine.Query("select * from user")
+
+2. execute a SQL string, the returned results
+
+ affected, err := engine.Exec("update user set .... where ...")
+
+ORM Methods
+
+There are 8 major ORM methods and many helpful methods to use to operate database.
+
+1. Insert one or multiple records to database
+
+ affected, err := engine.Insert(&struct)
+ // INSERT INTO struct () values ()
+ affected, err := engine.Insert(&struct1, &struct2)
+ // INSERT INTO struct1 () values ()
+ // INSERT INTO struct2 () values ()
+ affected, err := engine.Insert(&sliceOfStruct)
+ // INSERT INTO struct () values (),(),()
+ affected, err := engine.Insert(&struct1, &sliceOfStruct2)
+ // INSERT INTO struct1 () values ()
+ // INSERT INTO struct2 () values (),(),()
+
+2. Query one record or one variable from database
+
+ has, err := engine.Get(&user)
+ // SELECT * FROM user LIMIT 1
+
+ var id int64
+ has, err := engine.Table("user").Where("name = ?", name).Get(&id)
+ // SELECT id FROM user WHERE name = ? LIMIT 1
+
+3. Query multiple records from database
+
+ var sliceOfStructs []Struct
+ err := engine.Find(&sliceOfStructs)
+ // SELECT * FROM user
+
+ var mapOfStructs = make(map[int64]Struct)
+ err := engine.Find(&mapOfStructs)
+ // SELECT * FROM user
+
+ var int64s []int64
+ err := engine.Table("user").Cols("id").Find(&int64s)
+ // SELECT id FROM user
+
+4. Query multiple records and record by record handle, there two methods, one is Iterate,
+another is Rows
+
+ err := engine.Iterate(...)
+ // SELECT * FROM user
+
+ rows, err := engine.Rows(...)
+ // SELECT * FROM user
+ defer rows.Close()
+ bean := new(Struct)
+ for rows.Next() {
+ err = rows.Scan(bean)
+ }
+
+5. Update one or more records
+
+ affected, err := engine.ID(...).Update(&user)
+ // UPDATE user SET ...
+
+6. Delete one or more records, Delete MUST has condition
+
+ affected, err := engine.Where(...).Delete(&user)
+ // DELETE FROM user Where ...
+
+7. Count records
+
+ counts, err := engine.Count(&user)
+ // SELECT count(*) AS total FROM user
+
+ counts, err := engine.SQL("select count(*) FROM user").Count()
+ // select count(*) FROM user
+
+8. Sum records
+
+ sumFloat64, err := engine.Sum(&user, "id")
+ // SELECT sum(id) from user
+
+ sumFloat64s, err := engine.Sums(&user, "id1", "id2")
+ // SELECT sum(id1), sum(id2) from user
+
+ sumInt64s, err := engine.SumsInt(&user, "id1", "id2")
+ // SELECT sum(id1), sum(id2) from user
+
+Conditions
+
+The above 8 methods could use with condition methods chainable.
+Attention: the above 8 methods should be the last chainable method.
+
+1. ID, In
+
+ engine.ID(1).Get(&user) // for single primary key
+ // SELECT * FROM user WHERE id = 1
+ engine.ID(core.PK{1, 2}).Get(&user) // for composite primary keys
+ // SELECT * FROM user WHERE id1 = 1 AND id2 = 2
+ engine.In("id", 1, 2, 3).Find(&users)
+ // SELECT * FROM user WHERE id IN (1, 2, 3)
+ engine.In("id", []int{1, 2, 3}).Find(&users)
+ // SELECT * FROM user WHERE id IN (1, 2, 3)
+
+2. Where, And, Or
+
+ engine.Where().And().Or().Find()
+ // SELECT * FROM user WHERE (.. AND ..) OR ...
+
+3. OrderBy, Asc, Desc
+
+ engine.Asc().Desc().Find()
+ // SELECT * FROM user ORDER BY .. ASC, .. DESC
+ engine.OrderBy().Find()
+ // SELECT * FROM user ORDER BY ..
+
+4. Limit, Top
+
+ engine.Limit().Find()
+ // SELECT * FROM user LIMIT .. OFFSET ..
+ engine.Top(5).Find()
+ // SELECT TOP 5 * FROM user // for mssql
+ // SELECT * FROM user LIMIT .. OFFSET 0 //for other databases
+
+5. SQL, let you custom SQL
+
+ var users []User
+ engine.SQL("select * from user").Find(&users)
+
+6. Cols, Omit, Distinct
+
+ var users []*User
+ engine.Cols("col1, col2").Find(&users)
+ // SELECT col1, col2 FROM user
+ engine.Cols("col1", "col2").Where().Update(user)
+ // UPDATE user set col1 = ?, col2 = ? Where ...
+ engine.Omit("col1").Find(&users)
+ // SELECT col2, col3 FROM user
+ engine.Omit("col1").Insert(&user)
+ // INSERT INTO table (non-col1) VALUES ()
+ engine.Distinct("col1").Find(&users)
+ // SELECT DISTINCT col1 FROM user
+
+7. Join, GroupBy, Having
+
+ engine.GroupBy("name").Having("name='xlw'").Find(&users)
+ //SELECT * FROM user GROUP BY name HAVING name='xlw'
+ engine.Join("LEFT", "userdetail", "user.id=userdetail.id").Find(&users)
+ //SELECT * FROM user LEFT JOIN userdetail ON user.id=userdetail.id
+
+More usage, please visit http://xorm.io/docs
+*/
+package xorm
diff --git a/vendor/github.com/go-xorm/xorm/engine.go b/vendor/github.com/go-xorm/xorm/engine.go
new file mode 100644
index 0000000..ebcab91
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/engine.go
@@ -0,0 +1,1645 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "database/sql"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "xorm.io/builder"
+ "xorm.io/core"
+)
+
+// Engine is the major struct of xorm, it means a database manager.
+// Commonly, an application only need one engine
+type Engine struct {
+ db *core.DB
+ dialect core.Dialect
+
+ ColumnMapper core.IMapper
+ TableMapper core.IMapper
+ TagIdentifier string
+ Tables map[reflect.Type]*core.Table
+
+ mutex *sync.RWMutex
+ Cacher core.Cacher
+
+ showSQL bool
+ showExecTime bool
+
+ logger core.ILogger
+ TZLocation *time.Location // The timezone of the application
+ DatabaseTZ *time.Location // The timezone of the database
+
+ disableGlobalCache bool
+
+ tagHandlers map[string]tagHandler
+
+ engineGroup *EngineGroup
+
+ cachers map[string]core.Cacher
+ cacherLock sync.RWMutex
+
+ defaultContext context.Context
+}
+
+func (engine *Engine) setCacher(tableName string, cacher core.Cacher) {
+ engine.cacherLock.Lock()
+ engine.cachers[tableName] = cacher
+ engine.cacherLock.Unlock()
+}
+
+func (engine *Engine) SetCacher(tableName string, cacher core.Cacher) {
+ engine.setCacher(tableName, cacher)
+}
+
+func (engine *Engine) getCacher(tableName string) core.Cacher {
+ var cacher core.Cacher
+ var ok bool
+ engine.cacherLock.RLock()
+ cacher, ok = engine.cachers[tableName]
+ engine.cacherLock.RUnlock()
+ if !ok && !engine.disableGlobalCache {
+ cacher = engine.Cacher
+ }
+ return cacher
+}
+
+func (engine *Engine) GetCacher(tableName string) core.Cacher {
+ return engine.getCacher(tableName)
+}
+
+// BufferSize sets buffer size for iterate
+func (engine *Engine) BufferSize(size int) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.BufferSize(size)
+}
+
+// CondDeleted returns the conditions whether a record is soft deleted.
+func (engine *Engine) CondDeleted(colName string) builder.Cond {
+ if engine.dialect.DBType() == core.MSSQL {
+ return builder.IsNull{colName}
+ }
+ return builder.IsNull{colName}.Or(builder.Eq{colName: zeroTime1})
+}
+
+// ShowSQL show SQL statement or not on logger if log level is great than INFO
+func (engine *Engine) ShowSQL(show ...bool) {
+ engine.logger.ShowSQL(show...)
+ if len(show) == 0 {
+ engine.showSQL = true
+ } else {
+ engine.showSQL = show[0]
+ }
+}
+
+// ShowExecTime show SQL statement and execute time or not on logger if log level is great than INFO
+func (engine *Engine) ShowExecTime(show ...bool) {
+ if len(show) == 0 {
+ engine.showExecTime = true
+ } else {
+ engine.showExecTime = show[0]
+ }
+}
+
+// Logger return the logger interface
+func (engine *Engine) Logger() core.ILogger {
+ return engine.logger
+}
+
+// SetLogger set the new logger
+func (engine *Engine) SetLogger(logger core.ILogger) {
+ engine.logger = logger
+ engine.showSQL = logger.IsShowSQL()
+ engine.dialect.SetLogger(logger)
+}
+
+// SetLogLevel sets the logger level
+func (engine *Engine) SetLogLevel(level core.LogLevel) {
+ engine.logger.SetLevel(level)
+}
+
+// SetDisableGlobalCache disable global cache or not
+func (engine *Engine) SetDisableGlobalCache(disable bool) {
+ if engine.disableGlobalCache != disable {
+ engine.disableGlobalCache = disable
+ }
+}
+
+// DriverName return the current sql driver's name
+func (engine *Engine) DriverName() string {
+ return engine.dialect.DriverName()
+}
+
+// DataSourceName return the current connection string
+func (engine *Engine) DataSourceName() string {
+ return engine.dialect.DataSourceName()
+}
+
+// SetMapper set the name mapping rules
+func (engine *Engine) SetMapper(mapper core.IMapper) {
+ engine.SetTableMapper(mapper)
+ engine.SetColumnMapper(mapper)
+}
+
+// SetTableMapper set the table name mapping rule
+func (engine *Engine) SetTableMapper(mapper core.IMapper) {
+ engine.TableMapper = mapper
+}
+
+// SetColumnMapper set the column name mapping rule
+func (engine *Engine) SetColumnMapper(mapper core.IMapper) {
+ engine.ColumnMapper = mapper
+}
+
+// SupportInsertMany If engine's database support batch insert records like
+// "insert into user values (name, age), (name, age)".
+// When the return is ture, then engine.Insert(&users) will
+// generate batch sql and exeute.
+func (engine *Engine) SupportInsertMany() bool {
+ return engine.dialect.SupportInsertMany()
+}
+
+func (engine *Engine) quoteColumns(columnStr string) string {
+ columns := strings.Split(columnStr, ",")
+ for i := 0; i < len(columns); i++ {
+ columns[i] = engine.Quote(strings.TrimSpace(columns[i]))
+ }
+ return strings.Join(columns, ",")
+}
+
+// Quote Use QuoteStr quote the string sql
+func (engine *Engine) Quote(value string) string {
+ value = strings.TrimSpace(value)
+ if len(value) == 0 {
+ return value
+ }
+
+ buf := builder.StringBuilder{}
+ engine.QuoteTo(&buf, value)
+
+ return buf.String()
+}
+
+// QuoteTo quotes string and writes into the buffer
+func (engine *Engine) QuoteTo(buf *builder.StringBuilder, value string) {
+ if buf == nil {
+ return
+ }
+
+ value = strings.TrimSpace(value)
+ if value == "" {
+ return
+ }
+
+ quotePair := engine.dialect.Quote("")
+
+ if value[0] == '`' || len(quotePair) < 2 || value[0] == quotePair[0] { // no quote
+ _, _ = buf.WriteString(value)
+ return
+ } else {
+ prefix, suffix := quotePair[0], quotePair[1]
+
+ _ = buf.WriteByte(prefix)
+ for i := 0; i < len(value); i++ {
+ if value[i] == '.' {
+ _ = buf.WriteByte(suffix)
+ _ = buf.WriteByte('.')
+ _ = buf.WriteByte(prefix)
+ } else {
+ _ = buf.WriteByte(value[i])
+ }
+ }
+ _ = buf.WriteByte(suffix)
+ }
+}
+
+func (engine *Engine) quote(sql string) string {
+ return engine.dialect.Quote(sql)
+}
+
+// SqlType will be deprecated, please use SQLType instead
+//
+// Deprecated: use SQLType instead
+func (engine *Engine) SqlType(c *core.Column) string {
+ return engine.SQLType(c)
+}
+
+// SQLType A simple wrapper to dialect's core.SqlType method
+func (engine *Engine) SQLType(c *core.Column) string {
+ return engine.dialect.SqlType(c)
+}
+
+// AutoIncrStr Database's autoincrement statement
+func (engine *Engine) AutoIncrStr() string {
+ return engine.dialect.AutoIncrStr()
+}
+
+// SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
+func (engine *Engine) SetConnMaxLifetime(d time.Duration) {
+ engine.db.SetConnMaxLifetime(d)
+}
+
+// SetMaxOpenConns is only available for go 1.2+
+func (engine *Engine) SetMaxOpenConns(conns int) {
+ engine.db.SetMaxOpenConns(conns)
+}
+
+// SetMaxIdleConns set the max idle connections on pool, default is 2
+func (engine *Engine) SetMaxIdleConns(conns int) {
+ engine.db.SetMaxIdleConns(conns)
+}
+
+// SetDefaultCacher set the default cacher. Xorm's default not enable cacher.
+func (engine *Engine) SetDefaultCacher(cacher core.Cacher) {
+ engine.Cacher = cacher
+}
+
+// GetDefaultCacher returns the default cacher
+func (engine *Engine) GetDefaultCacher() core.Cacher {
+ return engine.Cacher
+}
+
+// NoCache If you has set default cacher, and you want temporilly stop use cache,
+// you can use NoCache()
+func (engine *Engine) NoCache() *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.NoCache()
+}
+
+// NoCascade If you do not want to auto cascade load object
+func (engine *Engine) NoCascade() *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.NoCascade()
+}
+
+// MapCacher Set a table use a special cacher
+func (engine *Engine) MapCacher(bean interface{}, cacher core.Cacher) error {
+ engine.setCacher(engine.TableName(bean, true), cacher)
+ return nil
+}
+
+// NewDB provides an interface to operate database directly
+func (engine *Engine) NewDB() (*core.DB, error) {
+ return core.OpenDialect(engine.dialect)
+}
+
+// DB return the wrapper of sql.DB
+func (engine *Engine) DB() *core.DB {
+ return engine.db
+}
+
+// Dialect return database dialect
+func (engine *Engine) Dialect() core.Dialect {
+ return engine.dialect
+}
+
+// NewSession New a session
+func (engine *Engine) NewSession() *Session {
+ session := &Session{engine: engine}
+ session.Init()
+ return session
+}
+
+// Close the engine
+func (engine *Engine) Close() error {
+ return engine.db.Close()
+}
+
+// Ping tests if database is alive
+func (engine *Engine) Ping() error {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Ping()
+}
+
+// logging sql
+func (engine *Engine) logSQL(sqlStr string, sqlArgs ...interface{}) {
+ if engine.showSQL && !engine.showExecTime {
+ if len(sqlArgs) > 0 {
+ engine.logger.Infof("[SQL] %v %#v", sqlStr, sqlArgs)
+ } else {
+ engine.logger.Infof("[SQL] %v", sqlStr)
+ }
+ }
+}
+
+// Sql provides raw sql input parameter. When you have a complex SQL statement
+// and cannot use Where, Id, In and etc. Methods to describe, you can use SQL.
+//
+// Deprecated: use SQL instead.
+func (engine *Engine) Sql(querystring string, args ...interface{}) *Session {
+ return engine.SQL(querystring, args...)
+}
+
+// SQL method let's you manually write raw SQL and operate
+// For example:
+//
+// engine.SQL("select * from user").Find(&users)
+//
+// This code will execute "select * from user" and set the records to users
+func (engine *Engine) SQL(query interface{}, args ...interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.SQL(query, args...)
+}
+
+// NoAutoTime Default if your struct has "created" or "updated" filed tag, the fields
+// will automatically be filled with current time when Insert or Update
+// invoked. Call NoAutoTime if you dont' want to fill automatically.
+func (engine *Engine) NoAutoTime() *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.NoAutoTime()
+}
+
+// NoAutoCondition disable auto generate Where condition from bean or not
+func (engine *Engine) NoAutoCondition(no ...bool) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.NoAutoCondition(no...)
+}
+
+// DBMetas Retrieve all tables, columns, indexes' informations from database.
+func (engine *Engine) DBMetas() ([]*core.Table, error) {
+ tables, err := engine.dialect.GetTables()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, table := range tables {
+ colSeq, cols, err := engine.dialect.GetColumns(table.Name)
+ if err != nil {
+ return nil, err
+ }
+ for _, name := range colSeq {
+ table.AddColumn(cols[name])
+ }
+ indexes, err := engine.dialect.GetIndexes(table.Name)
+ if err != nil {
+ return nil, err
+ }
+ table.Indexes = indexes
+
+ for _, index := range indexes {
+ for _, name := range index.Cols {
+ if col := table.GetColumn(name); col != nil {
+ col.Indexes[index.Name] = index.Type
+ } else {
+ return nil, fmt.Errorf("Unknown col %s in index %v of table %v, columns %v", name, index.Name, table.Name, table.ColumnsSeq())
+ }
+ }
+ }
+ }
+ return tables, nil
+}
+
+// DumpAllToFile dump database all table structs and data to a file
+func (engine *Engine) DumpAllToFile(fp string, tp ...core.DbType) error {
+ f, err := os.Create(fp)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return engine.DumpAll(f, tp...)
+}
+
+// DumpAll dump database all table structs and data to w
+func (engine *Engine) DumpAll(w io.Writer, tp ...core.DbType) error {
+ tables, err := engine.DBMetas()
+ if err != nil {
+ return err
+ }
+ return engine.DumpTables(tables, w, tp...)
+}
+
+// DumpTablesToFile dump specified tables to SQL file.
+func (engine *Engine) DumpTablesToFile(tables []*core.Table, fp string, tp ...core.DbType) error {
+ f, err := os.Create(fp)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return engine.DumpTables(tables, f, tp...)
+}
+
+// DumpTables dump specify tables to io.Writer
+func (engine *Engine) DumpTables(tables []*core.Table, w io.Writer, tp ...core.DbType) error {
+ return engine.dumpTables(tables, w, tp...)
+}
+
+// dumpTables dump database all table structs and data to w with specify db type
+func (engine *Engine) dumpTables(tables []*core.Table, w io.Writer, tp ...core.DbType) error {
+ var dialect core.Dialect
+ var distDBName string
+ if len(tp) == 0 {
+ dialect = engine.dialect
+ distDBName = string(engine.dialect.DBType())
+ } else {
+ dialect = core.QueryDialect(tp[0])
+ if dialect == nil {
+ return errors.New("Unsupported database type")
+ }
+ dialect.Init(nil, engine.dialect.URI(), "", "")
+ distDBName = string(tp[0])
+ }
+
+ _, err := io.WriteString(w, fmt.Sprintf("/*Generated by xorm v%s %s, from %s to %s*/\n\n",
+ Version, time.Now().In(engine.TZLocation).Format("2006-01-02 15:04:05"), engine.dialect.DBType(), strings.ToUpper(distDBName)))
+ if err != nil {
+ return err
+ }
+
+ for i, table := range tables {
+ if i > 0 {
+ _, err = io.WriteString(w, "\n")
+ if err != nil {
+ return err
+ }
+ }
+ _, err = io.WriteString(w, dialect.CreateTableSql(table, "", table.StoreEngine, "")+";\n")
+ if err != nil {
+ return err
+ }
+ for _, index := range table.Indexes {
+ _, err = io.WriteString(w, dialect.CreateIndexSql(table.Name, index)+";\n")
+ if err != nil {
+ return err
+ }
+ }
+
+ cols := table.ColumnsSeq()
+ colNames := engine.dialect.Quote(strings.Join(cols, engine.dialect.Quote(", ")))
+ destColNames := dialect.Quote(strings.Join(cols, dialect.Quote(", ")))
+
+ rows, err := engine.DB().Query("SELECT " + colNames + " FROM " + engine.Quote(table.Name))
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ dest := make([]interface{}, len(cols))
+ err = rows.ScanSlice(&dest)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.WriteString(w, "INSERT INTO "+dialect.Quote(table.Name)+" ("+destColNames+") VALUES (")
+ if err != nil {
+ return err
+ }
+
+ var temp string
+ for i, d := range dest {
+ col := table.GetColumn(cols[i])
+ if col == nil {
+ return errors.New("unknow column error")
+ }
+
+ if d == nil {
+ temp += ", NULL"
+ } else if col.SQLType.IsText() || col.SQLType.IsTime() {
+ var v = fmt.Sprintf("%s", d)
+ if strings.HasSuffix(v, " +0000 UTC") {
+ temp += fmt.Sprintf(", '%s'", v[0:len(v)-len(" +0000 UTC")])
+ } else {
+ temp += ", '" + strings.Replace(v, "'", "''", -1) + "'"
+ }
+ } else if col.SQLType.IsBlob() {
+ if reflect.TypeOf(d).Kind() == reflect.Slice {
+ temp += fmt.Sprintf(", %s", dialect.FormatBytes(d.([]byte)))
+ } else if reflect.TypeOf(d).Kind() == reflect.String {
+ temp += fmt.Sprintf(", '%s'", d.(string))
+ }
+ } else if col.SQLType.IsNumeric() {
+ switch reflect.TypeOf(d).Kind() {
+ case reflect.Slice:
+ if col.SQLType.Name == core.Bool {
+ temp += fmt.Sprintf(", %v", strconv.FormatBool(d.([]byte)[0] != byte('0')))
+ } else {
+ temp += fmt.Sprintf(", %s", string(d.([]byte)))
+ }
+ case reflect.Int16, reflect.Int8, reflect.Int32, reflect.Int64, reflect.Int:
+ if col.SQLType.Name == core.Bool {
+ temp += fmt.Sprintf(", %v", strconv.FormatBool(reflect.ValueOf(d).Int() > 0))
+ } else {
+ temp += fmt.Sprintf(", %v", d)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if col.SQLType.Name == core.Bool {
+ temp += fmt.Sprintf(", %v", strconv.FormatBool(reflect.ValueOf(d).Uint() > 0))
+ } else {
+ temp += fmt.Sprintf(", %v", d)
+ }
+ default:
+ temp += fmt.Sprintf(", %v", d)
+ }
+ } else {
+ s := fmt.Sprintf("%v", d)
+ if strings.Contains(s, ":") || strings.Contains(s, "-") {
+ if strings.HasSuffix(s, " +0000 UTC") {
+ temp += fmt.Sprintf(", '%s'", s[0:len(s)-len(" +0000 UTC")])
+ } else {
+ temp += fmt.Sprintf(", '%s'", s)
+ }
+ } else {
+ temp += fmt.Sprintf(", %s", s)
+ }
+ }
+ }
+ _, err = io.WriteString(w, temp[2:]+");\n")
+ if err != nil {
+ return err
+ }
+ }
+
+ // FIXME: Hack for postgres
+ if string(dialect.DBType()) == core.POSTGRES && table.AutoIncrColumn() != nil {
+ _, err = io.WriteString(w, "SELECT setval('"+table.Name+"_id_seq', COALESCE((SELECT MAX("+table.AutoIncrColumn().Name+") + 1 FROM "+dialect.Quote(table.Name)+"), 1), false);\n")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Cascade use cascade or not
+func (engine *Engine) Cascade(trueOrFalse ...bool) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Cascade(trueOrFalse...)
+}
+
+// Where method provide a condition query
+func (engine *Engine) Where(query interface{}, args ...interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Where(query, args...)
+}
+
+// Id will be deprecated, please use ID instead
+func (engine *Engine) Id(id interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Id(id)
+}
+
+// ID method provoide a condition as (id) = ?
+func (engine *Engine) ID(id interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.ID(id)
+}
+
+// Before apply before Processor, affected bean is passed to closure arg
+func (engine *Engine) Before(closures func(interface{})) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Before(closures)
+}
+
+// After apply after insert Processor, affected bean is passed to closure arg
+func (engine *Engine) After(closures func(interface{})) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.After(closures)
+}
+
+// Charset set charset when create table, only support mysql now
+func (engine *Engine) Charset(charset string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Charset(charset)
+}
+
+// StoreEngine set store engine when create table, only support mysql now
+func (engine *Engine) StoreEngine(storeEngine string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.StoreEngine(storeEngine)
+}
+
+// Distinct use for distinct columns. Caution: when you are using cache,
+// distinct will not be cached because cache system need id,
+// but distinct will not provide id
+func (engine *Engine) Distinct(columns ...string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Distinct(columns...)
+}
+
+// Select customerize your select columns or contents
+func (engine *Engine) Select(str string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Select(str)
+}
+
+// Cols only use the parameters as select or update columns
+func (engine *Engine) Cols(columns ...string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Cols(columns...)
+}
+
+// AllCols indicates that all columns should be use
+func (engine *Engine) AllCols() *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.AllCols()
+}
+
+// MustCols specify some columns must use even if they are empty
+func (engine *Engine) MustCols(columns ...string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.MustCols(columns...)
+}
+
+// UseBool xorm automatically retrieve condition according struct, but
+// if struct has bool field, it will ignore them. So use UseBool
+// to tell system to do not ignore them.
+// If no parameters, it will use all the bool field of struct, or
+// it will use parameters's columns
+func (engine *Engine) UseBool(columns ...string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.UseBool(columns...)
+}
+
+// Omit only not use the parameters as select or update columns
+func (engine *Engine) Omit(columns ...string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Omit(columns...)
+}
+
+// Nullable set null when column is zero-value and nullable for update
+func (engine *Engine) Nullable(columns ...string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Nullable(columns...)
+}
+
+// In will generate "column IN (?, ?)"
+func (engine *Engine) In(column string, args ...interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.In(column, args...)
+}
+
+// NotIn will generate "column NOT IN (?, ?)"
+func (engine *Engine) NotIn(column string, args ...interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.NotIn(column, args...)
+}
+
+// Incr provides a update string like "column = column + ?"
+func (engine *Engine) Incr(column string, arg ...interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Incr(column, arg...)
+}
+
+// Decr provides a update string like "column = column - ?"
+func (engine *Engine) Decr(column string, arg ...interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Decr(column, arg...)
+}
+
+// SetExpr provides a update string like "column = {expression}"
+func (engine *Engine) SetExpr(column string, expression string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.SetExpr(column, expression)
+}
+
+// Table temporarily change the Get, Find, Update's table
+func (engine *Engine) Table(tableNameOrBean interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Table(tableNameOrBean)
+}
+
+// Alias set the table alias
+func (engine *Engine) Alias(alias string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Alias(alias)
+}
+
+// Limit will generate "LIMIT start, limit"
+func (engine *Engine) Limit(limit int, start ...int) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Limit(limit, start...)
+}
+
+// Desc will generate "ORDER BY column1 DESC, column2 DESC"
+func (engine *Engine) Desc(colNames ...string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Desc(colNames...)
+}
+
+// Asc will generate "ORDER BY column1,column2 Asc"
+// This method can chainable use.
+//
+// engine.Desc("name").Asc("age").Find(&users)
+// // SELECT * FROM user ORDER BY name DESC, age ASC
+//
+func (engine *Engine) Asc(colNames ...string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Asc(colNames...)
+}
+
+// OrderBy will generate "ORDER BY order"
+func (engine *Engine) OrderBy(order string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.OrderBy(order)
+}
+
+// Prepare enables prepare statement
+func (engine *Engine) Prepare() *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Prepare()
+}
+
+// Join the join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN
+func (engine *Engine) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Join(joinOperator, tablename, condition, args...)
+}
+
+// GroupBy generate group by statement
+func (engine *Engine) GroupBy(keys string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.GroupBy(keys)
+}
+
+// Having generate having statement
+func (engine *Engine) Having(conditions string) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Having(conditions)
+}
+
+// UnMapType removes the datbase mapper of a type
+func (engine *Engine) UnMapType(t reflect.Type) {
+ engine.mutex.Lock()
+ defer engine.mutex.Unlock()
+ delete(engine.Tables, t)
+}
+
+func (engine *Engine) autoMapType(v reflect.Value) (*core.Table, error) {
+ t := v.Type()
+ engine.mutex.Lock()
+ defer engine.mutex.Unlock()
+ table, ok := engine.Tables[t]
+ if !ok {
+ var err error
+ table, err = engine.mapType(v)
+ if err != nil {
+ return nil, err
+ }
+
+ engine.Tables[t] = table
+ if engine.Cacher != nil {
+ if v.CanAddr() {
+ engine.GobRegister(v.Addr().Interface())
+ } else {
+ engine.GobRegister(v.Interface())
+ }
+ }
+ }
+ return table, nil
+}
+
+// GobRegister register one struct to gob for cache use
+func (engine *Engine) GobRegister(v interface{}) *Engine {
+ gob.Register(v)
+ return engine
+}
+
+// Table table struct
+type Table struct {
+ *core.Table
+ Name string
+}
+
+// IsValid if table is valid
+func (t *Table) IsValid() bool {
+ return t.Table != nil && len(t.Name) > 0
+}
+
+// TableInfo get table info according to bean's content
+func (engine *Engine) TableInfo(bean interface{}) *Table {
+ v := rValue(bean)
+ tb, err := engine.autoMapType(v)
+ if err != nil {
+ engine.logger.Error(err)
+ }
+ return &Table{tb, engine.TableName(bean)}
+}
+
+func addIndex(indexName string, table *core.Table, col *core.Column, indexType int) {
+ if index, ok := table.Indexes[indexName]; ok {
+ index.AddColumn(col.Name)
+ col.Indexes[index.Name] = indexType
+ } else {
+ index := core.NewIndex(indexName, indexType)
+ index.AddColumn(col.Name)
+ table.AddIndex(index)
+ col.Indexes[index.Name] = indexType
+ }
+}
+
+// TableName table name interface to define customerize table name
+type TableName interface {
+ TableName() string
+}
+
+var (
+ tpTableName = reflect.TypeOf((*TableName)(nil)).Elem()
+)
+
+func (engine *Engine) mapType(v reflect.Value) (*core.Table, error) {
+ t := v.Type()
+ table := core.NewEmptyTable()
+ table.Type = t
+ table.Name = engine.tbNameForMap(v)
+
+ var idFieldColName string
+ var hasCacheTag, hasNoCacheTag bool
+
+ for i := 0; i < t.NumField(); i++ {
+ tag := t.Field(i).Tag
+
+ ormTagStr := tag.Get(engine.TagIdentifier)
+ var col *core.Column
+ fieldValue := v.Field(i)
+ fieldType := fieldValue.Type()
+
+ if ormTagStr != "" {
+ col = &core.Column{FieldName: t.Field(i).Name, Nullable: true, IsPrimaryKey: false,
+ IsAutoIncrement: false, MapType: core.TWOSIDES, Indexes: make(map[string]int)}
+ tags := splitTag(ormTagStr)
+
+ if len(tags) > 0 {
+ if tags[0] == "-" {
+ continue
+ }
+
+ var ctx = tagContext{
+ table: table,
+ col: col,
+ fieldValue: fieldValue,
+ indexNames: make(map[string]int),
+ engine: engine,
+ }
+
+ if strings.HasPrefix(strings.ToUpper(tags[0]), "EXTENDS") {
+ pStart := strings.Index(tags[0], "(")
+ if pStart > -1 && strings.HasSuffix(tags[0], ")") {
+ var tagPrefix = strings.TrimFunc(tags[0][pStart+1:len(tags[0])-1], func(r rune) bool {
+ return r == '\'' || r == '"'
+ })
+
+ ctx.params = []string{tagPrefix}
+ }
+
+ if err := ExtendsTagHandler(&ctx); err != nil {
+ return nil, err
+ }
+ continue
+ }
+
+ for j, key := range tags {
+ if ctx.ignoreNext {
+ ctx.ignoreNext = false
+ continue
+ }
+
+ k := strings.ToUpper(key)
+ ctx.tagName = k
+ ctx.params = []string{}
+
+ pStart := strings.Index(k, "(")
+ if pStart == 0 {
+ return nil, errors.New("( could not be the first charactor")
+ }
+ if pStart > -1 {
+ if !strings.HasSuffix(k, ")") {
+ return nil, fmt.Errorf("field %s tag %s cannot match ) charactor", col.FieldName, key)
+ }
+
+ ctx.tagName = k[:pStart]
+ ctx.params = strings.Split(key[pStart+1:len(k)-1], ",")
+ }
+
+ if j > 0 {
+ ctx.preTag = strings.ToUpper(tags[j-1])
+ }
+ if j < len(tags)-1 {
+ ctx.nextTag = tags[j+1]
+ } else {
+ ctx.nextTag = ""
+ }
+
+ if h, ok := engine.tagHandlers[ctx.tagName]; ok {
+ if err := h(&ctx); err != nil {
+ return nil, err
+ }
+ } else {
+ if strings.HasPrefix(key, "'") && strings.HasSuffix(key, "'") {
+ col.Name = key[1 : len(key)-1]
+ } else {
+ col.Name = key
+ }
+ }
+
+ if ctx.hasCacheTag {
+ hasCacheTag = true
+ }
+ if ctx.hasNoCacheTag {
+ hasNoCacheTag = true
+ }
+ }
+
+ if col.SQLType.Name == "" {
+ col.SQLType = core.Type2SQLType(fieldType)
+ }
+ engine.dialect.SqlType(col)
+ if col.Length == 0 {
+ col.Length = col.SQLType.DefaultLength
+ }
+ if col.Length2 == 0 {
+ col.Length2 = col.SQLType.DefaultLength2
+ }
+ if col.Name == "" {
+ col.Name = engine.ColumnMapper.Obj2Table(t.Field(i).Name)
+ }
+
+ if ctx.isUnique {
+ ctx.indexNames[col.Name] = core.UniqueType
+ } else if ctx.isIndex {
+ ctx.indexNames[col.Name] = core.IndexType
+ }
+
+ for indexName, indexType := range ctx.indexNames {
+ addIndex(indexName, table, col, indexType)
+ }
+ }
+ } else {
+ var sqlType core.SQLType
+ if fieldValue.CanAddr() {
+ if _, ok := fieldValue.Addr().Interface().(core.Conversion); ok {
+ sqlType = core.SQLType{Name: core.Text}
+ }
+ }
+ if _, ok := fieldValue.Interface().(core.Conversion); ok {
+ sqlType = core.SQLType{Name: core.Text}
+ } else {
+ sqlType = core.Type2SQLType(fieldType)
+ }
+ col = core.NewColumn(engine.ColumnMapper.Obj2Table(t.Field(i).Name),
+ t.Field(i).Name, sqlType, sqlType.DefaultLength,
+ sqlType.DefaultLength2, true)
+
+ if fieldType.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) {
+ idFieldColName = col.Name
+ }
+ }
+ if col.IsAutoIncrement {
+ col.Nullable = false
+ }
+
+ table.AddColumn(col)
+
+ } // end for
+
+ if idFieldColName != "" && len(table.PrimaryKeys) == 0 {
+ col := table.GetColumn(idFieldColName)
+ col.IsPrimaryKey = true
+ col.IsAutoIncrement = true
+ col.Nullable = false
+ table.PrimaryKeys = append(table.PrimaryKeys, col.Name)
+ table.AutoIncrement = col.Name
+ }
+
+ if hasCacheTag {
+ if engine.Cacher != nil { // !nash! use engine's cacher if provided
+ engine.logger.Info("enable cache on table:", table.Name)
+ engine.setCacher(table.Name, engine.Cacher)
+ } else {
+ engine.logger.Info("enable LRU cache on table:", table.Name)
+ engine.setCacher(table.Name, NewLRUCacher2(NewMemoryStore(), time.Hour, 10000))
+ }
+ }
+ if hasNoCacheTag {
+ engine.logger.Info("disable cache on table:", table.Name)
+ engine.setCacher(table.Name, nil)
+ }
+
+ return table, nil
+}
+
+// IsTableEmpty if a table has any reocrd
+func (engine *Engine) IsTableEmpty(bean interface{}) (bool, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.IsTableEmpty(bean)
+}
+
+// IsTableExist if a table is exist
+func (engine *Engine) IsTableExist(beanOrTableName interface{}) (bool, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.IsTableExist(beanOrTableName)
+}
+
+// IdOf get id from one struct
+//
+// Deprecated: use IDOf instead.
+func (engine *Engine) IdOf(bean interface{}) core.PK {
+ return engine.IDOf(bean)
+}
+
+// IDOf get id from one struct
+func (engine *Engine) IDOf(bean interface{}) core.PK {
+ return engine.IdOfV(reflect.ValueOf(bean))
+}
+
+// IdOfV get id from one value of struct
+//
+// Deprecated: use IDOfV instead.
+func (engine *Engine) IdOfV(rv reflect.Value) core.PK {
+ return engine.IDOfV(rv)
+}
+
+// IDOfV get id from one value of struct
+func (engine *Engine) IDOfV(rv reflect.Value) core.PK {
+ pk, err := engine.idOfV(rv)
+ if err != nil {
+ engine.logger.Error(err)
+ return nil
+ }
+ return pk
+}
+
+func (engine *Engine) idOfV(rv reflect.Value) (core.PK, error) {
+ v := reflect.Indirect(rv)
+ table, err := engine.autoMapType(v)
+ if err != nil {
+ return nil, err
+ }
+
+ pk := make([]interface{}, len(table.PrimaryKeys))
+ for i, col := range table.PKColumns() {
+ var err error
+
+ fieldName := col.FieldName
+ for {
+ parts := strings.SplitN(fieldName, ".", 2)
+ if len(parts) == 1 {
+ break
+ }
+
+ v = v.FieldByName(parts[0])
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.Struct {
+ return nil, ErrUnSupportedType
+ }
+ fieldName = parts[1]
+ }
+
+ pkField := v.FieldByName(fieldName)
+ switch pkField.Kind() {
+ case reflect.String:
+ pk[i], err = engine.idTypeAssertion(col, pkField.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ pk[i], err = engine.idTypeAssertion(col, strconv.FormatInt(pkField.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ // id of uint will be converted to int64
+ pk[i], err = engine.idTypeAssertion(col, strconv.FormatUint(pkField.Uint(), 10))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+ return core.PK(pk), nil
+}
+
+func (engine *Engine) idTypeAssertion(col *core.Column, sid string) (interface{}, error) {
+ if col.SQLType.IsNumeric() {
+ n, err := strconv.ParseInt(sid, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+ } else if col.SQLType.IsText() {
+ return sid, nil
+ } else {
+ return nil, errors.New("not supported")
+ }
+}
+
+// CreateIndexes create indexes
+func (engine *Engine) CreateIndexes(bean interface{}) error {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.CreateIndexes(bean)
+}
+
+// CreateUniques create uniques
+func (engine *Engine) CreateUniques(bean interface{}) error {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.CreateUniques(bean)
+}
+
+// ClearCacheBean if enabled cache, clear the cache bean
+func (engine *Engine) ClearCacheBean(bean interface{}, id string) error {
+ tableName := engine.TableName(bean)
+ cacher := engine.getCacher(tableName)
+ if cacher != nil {
+ cacher.ClearIds(tableName)
+ cacher.DelBean(tableName, id)
+ }
+ return nil
+}
+
+// ClearCache if enabled cache, clear some tables' cache
+func (engine *Engine) ClearCache(beans ...interface{}) error {
+ for _, bean := range beans {
+ tableName := engine.TableName(bean)
+ cacher := engine.getCacher(tableName)
+ if cacher != nil {
+ cacher.ClearIds(tableName)
+ cacher.ClearBeans(tableName)
+ }
+ }
+ return nil
+}
+
+// Sync the new struct changes to database, this method will automatically add
+// table, column, index, unique. but will not delete or change anything.
+// If you change some field, you should change the database manually.
+func (engine *Engine) Sync(beans ...interface{}) error {
+ session := engine.NewSession()
+ defer session.Close()
+
+ for _, bean := range beans {
+ v := rValue(bean)
+ tableNameNoSchema := engine.TableName(bean)
+ table, err := engine.autoMapType(v)
+ if err != nil {
+ return err
+ }
+
+ isExist, err := session.Table(bean).isTableExist(tableNameNoSchema)
+ if err != nil {
+ return err
+ }
+ if !isExist {
+ err = session.createTable(bean)
+ if err != nil {
+ return err
+ }
+ }
+ /*isEmpty, err := engine.IsEmptyTable(bean)
+ if err != nil {
+ return err
+ }*/
+ var isEmpty bool
+ if isEmpty {
+ err = session.dropTable(bean)
+ if err != nil {
+ return err
+ }
+ err = session.createTable(bean)
+ if err != nil {
+ return err
+ }
+ } else {
+ for _, col := range table.Columns() {
+ isExist, err := engine.dialect.IsColumnExist(tableNameNoSchema, col.Name)
+ if err != nil {
+ return err
+ }
+ if !isExist {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+ err = session.addColumn(col.Name)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ for name, index := range table.Indexes {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+ if index.Type == core.UniqueType {
+ isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, true)
+ if err != nil {
+ return err
+ }
+ if !isExist {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+
+ err = session.addUnique(tableNameNoSchema, name)
+ if err != nil {
+ return err
+ }
+ }
+ } else if index.Type == core.IndexType {
+ isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, false)
+ if err != nil {
+ return err
+ }
+ if !isExist {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+
+ err = session.addIndex(tableNameNoSchema, name)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New("unknow index type")
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Sync2 synchronize structs to database tables
+func (engine *Engine) Sync2(beans ...interface{}) error {
+ s := engine.NewSession()
+ defer s.Close()
+ return s.Sync2(beans...)
+}
+
+// CreateTables create tabls according bean
+func (engine *Engine) CreateTables(beans ...interface{}) error {
+ session := engine.NewSession()
+ defer session.Close()
+
+ err := session.Begin()
+ if err != nil {
+ return err
+ }
+
+ for _, bean := range beans {
+ err = session.createTable(bean)
+ if err != nil {
+ session.Rollback()
+ return err
+ }
+ }
+ return session.Commit()
+}
+
+// DropTables drop specify tables
+func (engine *Engine) DropTables(beans ...interface{}) error {
+ session := engine.NewSession()
+ defer session.Close()
+
+ err := session.Begin()
+ if err != nil {
+ return err
+ }
+
+ for _, bean := range beans {
+ err = session.dropTable(bean)
+ if err != nil {
+ session.Rollback()
+ return err
+ }
+ }
+ return session.Commit()
+}
+
+// DropIndexes drop indexes of a table
+func (engine *Engine) DropIndexes(bean interface{}) error {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.DropIndexes(bean)
+}
+
+// Exec raw sql
+func (engine *Engine) Exec(sqlOrArgs ...interface{}) (sql.Result, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Exec(sqlOrArgs...)
+}
+
+// Query a raw sql and return records as []map[string][]byte
+func (engine *Engine) Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Query(sqlOrArgs...)
+}
+
+// QueryString runs a raw sql and return records as []map[string]string
+func (engine *Engine) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.QueryString(sqlOrArgs...)
+}
+
+// QueryInterface runs a raw sql and return records as []map[string]interface{}
+func (engine *Engine) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.QueryInterface(sqlOrArgs...)
+}
+
+// Insert one or more records
+func (engine *Engine) Insert(beans ...interface{}) (int64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Insert(beans...)
+}
+
+// InsertOne insert only one record
+func (engine *Engine) InsertOne(bean interface{}) (int64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.InsertOne(bean)
+}
+
+// Update records, bean's non-empty fields are updated contents,
+// condiBean' non-empty filds are conditions
+// CAUTION:
+// 1.bool will defaultly be updated content nor conditions
+// You should call UseBool if you have bool to use.
+// 2.float32 & float64 may be not inexact as conditions
+func (engine *Engine) Update(bean interface{}, condiBeans ...interface{}) (int64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Update(bean, condiBeans...)
+}
+
+// Delete records, bean's non-empty fields are conditions
+func (engine *Engine) Delete(bean interface{}) (int64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Delete(bean)
+}
+
+// Get retrieve one record from table, bean's non-empty fields
+// are conditions
+func (engine *Engine) Get(bean interface{}) (bool, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Get(bean)
+}
+
+// Exist returns true if the record exist otherwise return false
+func (engine *Engine) Exist(bean ...interface{}) (bool, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Exist(bean...)
+}
+
+// Find retrieve records from table, condiBeans's non-empty fields
+// are conditions. beans could be []Struct, []*Struct, map[int64]Struct
+// map[int64]*Struct
+func (engine *Engine) Find(beans interface{}, condiBeans ...interface{}) error {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Find(beans, condiBeans...)
+}
+
+// FindAndCount find the results and also return the counts
+func (engine *Engine) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.FindAndCount(rowsSlicePtr, condiBean...)
+}
+
+// Iterate record by record handle records from table, bean's non-empty fields
+// are conditions.
+func (engine *Engine) Iterate(bean interface{}, fun IterFunc) error {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Iterate(bean, fun)
+}
+
+// Rows return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields
+// are conditions.
+func (engine *Engine) Rows(bean interface{}) (*Rows, error) {
+ session := engine.NewSession()
+ return session.Rows(bean)
+}
+
+// Count counts the records. bean's non-empty fields are conditions.
+func (engine *Engine) Count(bean ...interface{}) (int64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Count(bean...)
+}
+
+// Sum sum the records by some column. bean's non-empty fields are conditions.
+func (engine *Engine) Sum(bean interface{}, colName string) (float64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Sum(bean, colName)
+}
+
+// SumInt sum the records by some column. bean's non-empty fields are conditions.
+func (engine *Engine) SumInt(bean interface{}, colName string) (int64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.SumInt(bean, colName)
+}
+
+// Sums sum the records by some columns. bean's non-empty fields are conditions.
+func (engine *Engine) Sums(bean interface{}, colNames ...string) ([]float64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.Sums(bean, colNames...)
+}
+
+// SumsInt like Sums but return slice of int64 instead of float64.
+func (engine *Engine) SumsInt(bean interface{}, colNames ...string) ([]int64, error) {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.SumsInt(bean, colNames...)
+}
+
+// ImportFile SQL DDL file
+func (engine *Engine) ImportFile(ddlPath string) ([]sql.Result, error) {
+ file, err := os.Open(ddlPath)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return engine.Import(file)
+}
+
+// Import SQL DDL from io.Reader
+func (engine *Engine) Import(r io.Reader) ([]sql.Result, error) {
+ var results []sql.Result
+ var lastError error
+ scanner := bufio.NewScanner(r)
+
+ semiColSpliter := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ if i := bytes.IndexByte(data, ';'); i >= 0 {
+ return i + 1, data[0:i], nil
+ }
+ // If we're at EOF, we have a final, non-terminated line. Return it.
+ if atEOF {
+ return len(data), data, nil
+ }
+ // Request more data.
+ return 0, nil, nil
+ }
+
+ scanner.Split(semiColSpliter)
+
+ for scanner.Scan() {
+ query := strings.Trim(scanner.Text(), " \t\n\r")
+ if len(query) > 0 {
+ engine.logSQL(query)
+ result, err := engine.DB().Exec(query)
+ results = append(results, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return results, lastError
+}
+
+// nowTime return current time
+func (engine *Engine) nowTime(col *core.Column) (interface{}, time.Time) {
+ t := time.Now()
+ var tz = engine.DatabaseTZ
+ if !col.DisableTimeZone && col.TimeZone != nil {
+ tz = col.TimeZone
+ }
+ return engine.formatTime(col.SQLType.Name, t.In(tz)), t.In(engine.TZLocation)
+}
+
+func (engine *Engine) formatColTime(col *core.Column, t time.Time) (v interface{}) {
+ if t.IsZero() {
+ if col.Nullable {
+ return nil
+ }
+ return ""
+ }
+
+ if col.TimeZone != nil {
+ return engine.formatTime(col.SQLType.Name, t.In(col.TimeZone))
+ }
+ return engine.formatTime(col.SQLType.Name, t.In(engine.DatabaseTZ))
+}
+
+// formatTime format time as column type
+func (engine *Engine) formatTime(sqlTypeName string, t time.Time) (v interface{}) {
+ switch sqlTypeName {
+ case core.Time:
+ s := t.Format("2006-01-02 15:04:05") // time.RFC3339
+ v = s[11:19]
+ case core.Date:
+ v = t.Format("2006-01-02")
+ case core.DateTime, core.TimeStamp:
+ v = t.Format("2006-01-02 15:04:05")
+ case core.TimeStampz:
+ if engine.dialect.DBType() == core.MSSQL {
+ v = t.Format("2006-01-02T15:04:05.9999999Z07:00")
+ } else {
+ v = t.Format(time.RFC3339Nano)
+ }
+ case core.BigInt, core.Int:
+ v = t.Unix()
+ default:
+ v = t
+ }
+ return
+}
+
+// GetColumnMapper returns the column name mapper
+func (engine *Engine) GetColumnMapper() core.IMapper {
+ return engine.ColumnMapper
+}
+
+// GetTableMapper returns the table name mapper
+func (engine *Engine) GetTableMapper() core.IMapper {
+ return engine.TableMapper
+}
+
+// GetTZLocation returns time zone of the application
+func (engine *Engine) GetTZLocation() *time.Location {
+ return engine.TZLocation
+}
+
+// SetTZLocation sets time zone of the application
+func (engine *Engine) SetTZLocation(tz *time.Location) {
+ engine.TZLocation = tz
+}
+
+// GetTZDatabase returns time zone of the database
+func (engine *Engine) GetTZDatabase() *time.Location {
+ return engine.DatabaseTZ
+}
+
+// SetTZDatabase sets time zone of the database
+func (engine *Engine) SetTZDatabase(tz *time.Location) {
+ engine.DatabaseTZ = tz
+}
+
+// SetSchema sets the schema of database
+func (engine *Engine) SetSchema(schema string) {
+ engine.dialect.URI().Schema = schema
+}
+
+// Unscoped always disable struct tag "deleted"
+func (engine *Engine) Unscoped() *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Unscoped()
+}
diff --git a/vendor/github.com/go-xorm/xorm/engine_cond.go b/vendor/github.com/go-xorm/xorm/engine_cond.go
new file mode 100644
index 0000000..702ac80
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/engine_cond.go
@@ -0,0 +1,232 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "xorm.io/builder"
+ "xorm.io/core"
+)
+
+func (engine *Engine) buildConds(table *core.Table, bean interface{},
+ includeVersion bool, includeUpdated bool, includeNil bool,
+ includeAutoIncr bool, allUseBool bool, useAllCols bool, unscoped bool,
+ mustColumnMap map[string]bool, tableName, aliasName string, addedTableName bool) (builder.Cond, error) {
+ var conds []builder.Cond
+ for _, col := range table.Columns() {
+ if !includeVersion && col.IsVersion {
+ continue
+ }
+ if !includeUpdated && col.IsUpdated {
+ continue
+ }
+ if !includeAutoIncr && col.IsAutoIncrement {
+ continue
+ }
+
+ if engine.dialect.DBType() == core.MSSQL && (col.SQLType.Name == core.Text || col.SQLType.IsBlob() || col.SQLType.Name == core.TimeStampz) {
+ continue
+ }
+ if col.SQLType.IsJson() {
+ continue
+ }
+
+ var colName string
+ if addedTableName {
+ var nm = tableName
+ if len(aliasName) > 0 {
+ nm = aliasName
+ }
+ colName = engine.Quote(nm) + "." + engine.Quote(col.Name)
+ } else {
+ colName = engine.Quote(col.Name)
+ }
+
+ fieldValuePtr, err := col.ValueOf(bean)
+ if err != nil {
+ if !strings.Contains(err.Error(), "is not valid") {
+ engine.logger.Warn(err)
+ }
+ continue
+ }
+
+ if col.IsDeleted && !unscoped { // tag "deleted" is enabled
+ conds = append(conds, engine.CondDeleted(colName))
+ }
+
+ fieldValue := *fieldValuePtr
+ if fieldValue.Interface() == nil {
+ continue
+ }
+
+ fieldType := reflect.TypeOf(fieldValue.Interface())
+ requiredField := useAllCols
+
+ if b, ok := getFlagForColumn(mustColumnMap, col); ok {
+ if b {
+ requiredField = true
+ } else {
+ continue
+ }
+ }
+
+ if fieldType.Kind() == reflect.Ptr {
+ if fieldValue.IsNil() {
+ if includeNil {
+ conds = append(conds, builder.Eq{colName: nil})
+ }
+ continue
+ } else if !fieldValue.IsValid() {
+ continue
+ } else {
+ // dereference ptr type to instance type
+ fieldValue = fieldValue.Elem()
+ fieldType = reflect.TypeOf(fieldValue.Interface())
+ requiredField = true
+ }
+ }
+
+ var val interface{}
+ switch fieldType.Kind() {
+ case reflect.Bool:
+ if allUseBool || requiredField {
+ val = fieldValue.Interface()
+ } else {
+ // if a bool in a struct, it will not be as a condition because it default is false,
+ // please use Where() instead
+ continue
+ }
+ case reflect.String:
+ if !requiredField && fieldValue.String() == "" {
+ continue
+ }
+ // for MyString, should convert to string or panic
+ if fieldType.String() != reflect.String.String() {
+ val = fieldValue.String()
+ } else {
+ val = fieldValue.Interface()
+ }
+ case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64:
+ if !requiredField && fieldValue.Int() == 0 {
+ continue
+ }
+ val = fieldValue.Interface()
+ case reflect.Float32, reflect.Float64:
+ if !requiredField && fieldValue.Float() == 0.0 {
+ continue
+ }
+ val = fieldValue.Interface()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64:
+ if !requiredField && fieldValue.Uint() == 0 {
+ continue
+ }
+ t := int64(fieldValue.Uint())
+ val = reflect.ValueOf(&t).Interface()
+ case reflect.Struct:
+ if fieldType.ConvertibleTo(core.TimeType) {
+ t := fieldValue.Convert(core.TimeType).Interface().(time.Time)
+ if !requiredField && (t.IsZero() || !fieldValue.IsValid()) {
+ continue
+ }
+ val = engine.formatColTime(col, t)
+ } else if _, ok := reflect.New(fieldType).Interface().(core.Conversion); ok {
+ continue
+ } else if valNul, ok := fieldValue.Interface().(driver.Valuer); ok {
+ val, _ = valNul.Value()
+ if val == nil {
+ continue
+ }
+ } else {
+ if col.SQLType.IsJson() {
+ if col.SQLType.IsText() {
+ bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ engine.logger.Error(err)
+ continue
+ }
+ val = string(bytes)
+ } else if col.SQLType.IsBlob() {
+ var bytes []byte
+ var err error
+ bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ engine.logger.Error(err)
+ continue
+ }
+ val = bytes
+ }
+ } else {
+ engine.autoMapType(fieldValue)
+ if table, ok := engine.Tables[fieldValue.Type()]; ok {
+ if len(table.PrimaryKeys) == 1 {
+ pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName)
+ // fix non-int pk issues
+ //if pkField.Int() != 0 {
+ if pkField.IsValid() && !isZero(pkField.Interface()) {
+ val = pkField.Interface()
+ } else {
+ continue
+ }
+ } else {
+ //TODO: how to handler?
+ return nil, fmt.Errorf("not supported %v as %v", fieldValue.Interface(), table.PrimaryKeys)
+ }
+ } else {
+ val = fieldValue.Interface()
+ }
+ }
+ }
+ case reflect.Array:
+ continue
+ case reflect.Slice, reflect.Map:
+ if fieldValue == reflect.Zero(fieldType) {
+ continue
+ }
+ if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 {
+ continue
+ }
+
+ if col.SQLType.IsText() {
+ bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ engine.logger.Error(err)
+ continue
+ }
+ val = string(bytes)
+ } else if col.SQLType.IsBlob() {
+ var bytes []byte
+ var err error
+ if (fieldType.Kind() == reflect.Array || fieldType.Kind() == reflect.Slice) &&
+ fieldType.Elem().Kind() == reflect.Uint8 {
+ if fieldValue.Len() > 0 {
+ val = fieldValue.Bytes()
+ } else {
+ continue
+ }
+ } else {
+ bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ engine.logger.Error(err)
+ continue
+ }
+ val = bytes
+ }
+ } else {
+ continue
+ }
+ default:
+ val = fieldValue.Interface()
+ }
+
+ conds = append(conds, builder.Eq{colName: val})
+ }
+
+ return builder.And(conds...), nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/engine_context.go b/vendor/github.com/go-xorm/xorm/engine_context.go
new file mode 100644
index 0000000..c6cbb76
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/engine_context.go
@@ -0,0 +1,28 @@
+// Copyright 2019 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package xorm
+
+import "context"
+
+// Context creates a session with the context
+func (engine *Engine) Context(ctx context.Context) *Session {
+ session := engine.NewSession()
+ session.isAutoClose = true
+ return session.Context(ctx)
+}
+
+// SetDefaultContext set the default context
+func (engine *Engine) SetDefaultContext(ctx context.Context) {
+ engine.defaultContext = ctx
+}
+
+// PingContext tests if database is alive
+func (engine *Engine) PingContext(ctx context.Context) error {
+ session := engine.NewSession()
+ defer session.Close()
+ return session.PingContext(ctx)
+}
diff --git a/vendor/github.com/go-xorm/xorm/engine_group.go b/vendor/github.com/go-xorm/xorm/engine_group.go
new file mode 100644
index 0000000..42d49ec
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/engine_group.go
@@ -0,0 +1,219 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "context"
+ "time"
+
+ "xorm.io/core"
+)
+
+// EngineGroup defines an engine group
+type EngineGroup struct {
+ *Engine
+ slaves []*Engine
+ policy GroupPolicy
+}
+
+// NewEngineGroup creates a new engine group
+func NewEngineGroup(args1 interface{}, args2 interface{}, policies ...GroupPolicy) (*EngineGroup, error) {
+ var eg EngineGroup
+ if len(policies) > 0 {
+ eg.policy = policies[0]
+ } else {
+ eg.policy = RoundRobinPolicy()
+ }
+
+ driverName, ok1 := args1.(string)
+ conns, ok2 := args2.([]string)
+ if ok1 && ok2 {
+ engines := make([]*Engine, len(conns))
+ for i, conn := range conns {
+ engine, err := NewEngine(driverName, conn)
+ if err != nil {
+ return nil, err
+ }
+ engine.engineGroup = &eg
+ engines[i] = engine
+ }
+
+ eg.Engine = engines[0]
+ eg.slaves = engines[1:]
+ return &eg, nil
+ }
+
+ master, ok3 := args1.(*Engine)
+ slaves, ok4 := args2.([]*Engine)
+ if ok3 && ok4 {
+ master.engineGroup = &eg
+ for i := 0; i < len(slaves); i++ {
+ slaves[i].engineGroup = &eg
+ }
+ eg.Engine = master
+ eg.slaves = slaves
+ return &eg, nil
+ }
+ return nil, ErrParamsType
+}
+
+// Close the engine
+func (eg *EngineGroup) Close() error {
+ err := eg.Engine.Close()
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < len(eg.slaves); i++ {
+ err := eg.slaves[i].Close()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Context returned a group session
+func (eg *EngineGroup) Context(ctx context.Context) *Session {
+ sess := eg.NewSession()
+ sess.isAutoClose = true
+ return sess.Context(ctx)
+}
+
+// NewSession returned a group session
+func (eg *EngineGroup) NewSession() *Session {
+ sess := eg.Engine.NewSession()
+ sess.sessionType = groupSession
+ return sess
+}
+
+// Master returns the master engine
+func (eg *EngineGroup) Master() *Engine {
+ return eg.Engine
+}
+
+// Ping tests if database is alive
+func (eg *EngineGroup) Ping() error {
+ if err := eg.Engine.Ping(); err != nil {
+ return err
+ }
+
+ for _, slave := range eg.slaves {
+ if err := slave.Ping(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SetColumnMapper set the column name mapping rule
+func (eg *EngineGroup) SetColumnMapper(mapper core.IMapper) {
+ eg.Engine.ColumnMapper = mapper
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].ColumnMapper = mapper
+ }
+}
+
+// SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
+func (eg *EngineGroup) SetConnMaxLifetime(d time.Duration) {
+ eg.Engine.SetConnMaxLifetime(d)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].SetConnMaxLifetime(d)
+ }
+}
+
+// SetDefaultCacher set the default cacher
+func (eg *EngineGroup) SetDefaultCacher(cacher core.Cacher) {
+ eg.Engine.SetDefaultCacher(cacher)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].SetDefaultCacher(cacher)
+ }
+}
+
+// SetLogger set the new logger
+func (eg *EngineGroup) SetLogger(logger core.ILogger) {
+ eg.Engine.SetLogger(logger)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].SetLogger(logger)
+ }
+}
+
+// SetLogLevel sets the logger level
+func (eg *EngineGroup) SetLogLevel(level core.LogLevel) {
+ eg.Engine.SetLogLevel(level)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].SetLogLevel(level)
+ }
+}
+
+// SetMapper set the name mapping rules
+func (eg *EngineGroup) SetMapper(mapper core.IMapper) {
+ eg.Engine.SetMapper(mapper)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].SetMapper(mapper)
+ }
+}
+
+// SetMaxIdleConns set the max idle connections on pool, default is 2
+func (eg *EngineGroup) SetMaxIdleConns(conns int) {
+ eg.Engine.db.SetMaxIdleConns(conns)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].db.SetMaxIdleConns(conns)
+ }
+}
+
+// SetMaxOpenConns is only available for go 1.2+
+func (eg *EngineGroup) SetMaxOpenConns(conns int) {
+ eg.Engine.db.SetMaxOpenConns(conns)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].db.SetMaxOpenConns(conns)
+ }
+}
+
+// SetPolicy set the group policy
+func (eg *EngineGroup) SetPolicy(policy GroupPolicy) *EngineGroup {
+ eg.policy = policy
+ return eg
+}
+
+// SetTableMapper set the table name mapping rule
+func (eg *EngineGroup) SetTableMapper(mapper core.IMapper) {
+ eg.Engine.TableMapper = mapper
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].TableMapper = mapper
+ }
+}
+
+// ShowExecTime show SQL statement and execute time or not on logger if log level is great than INFO
+func (eg *EngineGroup) ShowExecTime(show ...bool) {
+ eg.Engine.ShowExecTime(show...)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].ShowExecTime(show...)
+ }
+}
+
+// ShowSQL show SQL statement or not on logger if log level is great than INFO
+func (eg *EngineGroup) ShowSQL(show ...bool) {
+ eg.Engine.ShowSQL(show...)
+ for i := 0; i < len(eg.slaves); i++ {
+ eg.slaves[i].ShowSQL(show...)
+ }
+}
+
+// Slave returns one of the physical databases which is a slave according the policy
+func (eg *EngineGroup) Slave() *Engine {
+ switch len(eg.slaves) {
+ case 0:
+ return eg.Engine
+ case 1:
+ return eg.slaves[0]
+ }
+ return eg.policy.Slave(eg)
+}
+
+// Slaves returns all the slaves
+func (eg *EngineGroup) Slaves() []*Engine {
+ return eg.slaves
+}
diff --git a/vendor/github.com/go-xorm/xorm/engine_group_policy.go b/vendor/github.com/go-xorm/xorm/engine_group_policy.go
new file mode 100644
index 0000000..5b56e89
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/engine_group_policy.go
@@ -0,0 +1,116 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// GroupPolicy is be used by chosing the current slave from slaves
+type GroupPolicy interface {
+ Slave(*EngineGroup) *Engine
+}
+
+// GroupPolicyHandler should be used when a function is a GroupPolicy
+type GroupPolicyHandler func(*EngineGroup) *Engine
+
+// Slave implements the chosen of slaves
+func (h GroupPolicyHandler) Slave(eg *EngineGroup) *Engine {
+ return h(eg)
+}
+
+// RandomPolicy implmentes randomly chose the slave of slaves
+func RandomPolicy() GroupPolicyHandler {
+ var r = rand.New(rand.NewSource(time.Now().UnixNano()))
+ return func(g *EngineGroup) *Engine {
+ return g.Slaves()[r.Intn(len(g.Slaves()))]
+ }
+}
+
+// WeightRandomPolicy implmentes randomly chose the slave of slaves
+func WeightRandomPolicy(weights []int) GroupPolicyHandler {
+ var rands = make([]int, 0, len(weights))
+ for i := 0; i < len(weights); i++ {
+ for n := 0; n < weights[i]; n++ {
+ rands = append(rands, i)
+ }
+ }
+ var r = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ return func(g *EngineGroup) *Engine {
+ var slaves = g.Slaves()
+ idx := rands[r.Intn(len(rands))]
+ if idx >= len(slaves) {
+ idx = len(slaves) - 1
+ }
+ return slaves[idx]
+ }
+}
+
+func RoundRobinPolicy() GroupPolicyHandler {
+ var pos = -1
+ var lock sync.Mutex
+ return func(g *EngineGroup) *Engine {
+ var slaves = g.Slaves()
+
+ lock.Lock()
+ defer lock.Unlock()
+ pos++
+ if pos >= len(slaves) {
+ pos = 0
+ }
+
+ return slaves[pos]
+ }
+}
+
+func WeightRoundRobinPolicy(weights []int) GroupPolicyHandler {
+ var rands = make([]int, 0, len(weights))
+ for i := 0; i < len(weights); i++ {
+ for n := 0; n < weights[i]; n++ {
+ rands = append(rands, i)
+ }
+ }
+ var pos = -1
+ var lock sync.Mutex
+
+ return func(g *EngineGroup) *Engine {
+ var slaves = g.Slaves()
+ lock.Lock()
+ defer lock.Unlock()
+ pos++
+ if pos >= len(rands) {
+ pos = 0
+ }
+
+ idx := rands[pos]
+ if idx >= len(slaves) {
+ idx = len(slaves) - 1
+ }
+ return slaves[idx]
+ }
+}
+
+// LeastConnPolicy implements GroupPolicy, every time will get the least connections slave
+func LeastConnPolicy() GroupPolicyHandler {
+ return func(g *EngineGroup) *Engine {
+ var slaves = g.Slaves()
+ connections := 0
+ idx := 0
+ for i := 0; i < len(slaves); i++ {
+ openConnections := slaves[i].DB().Stats().OpenConnections
+ if i == 0 {
+ connections = openConnections
+ idx = i
+ } else if openConnections <= connections {
+ connections = openConnections
+ idx = i
+ }
+ }
+ return slaves[idx]
+ }
+}
diff --git a/vendor/github.com/go-xorm/xorm/engine_table.go b/vendor/github.com/go-xorm/xorm/engine_table.go
new file mode 100644
index 0000000..eb5aa85
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/engine_table.go
@@ -0,0 +1,113 @@
+// Copyright 2018 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "xorm.io/core"
+)
+
+// tbNameWithSchema will automatically add schema prefix on table name
+func (engine *Engine) tbNameWithSchema(v string) string {
+ // Add schema name as prefix of table name.
+ // Only for postgres database.
+ if engine.dialect.DBType() == core.POSTGRES &&
+ engine.dialect.URI().Schema != "" &&
+ engine.dialect.URI().Schema != postgresPublicSchema &&
+ strings.Index(v, ".") == -1 {
+ return engine.dialect.URI().Schema + "." + v
+ }
+ return v
+}
+
+// TableName returns table name with schema prefix if has
+func (engine *Engine) TableName(bean interface{}, includeSchema ...bool) string {
+ tbName := engine.tbNameNoSchema(bean)
+ if len(includeSchema) > 0 && includeSchema[0] {
+ tbName = engine.tbNameWithSchema(tbName)
+ }
+
+ return tbName
+}
+
+// tbName get some table's table name
+func (session *Session) tbNameNoSchema(table *core.Table) string {
+ if len(session.statement.AltTableName) > 0 {
+ return session.statement.AltTableName
+ }
+
+ return table.Name
+}
+
+func (engine *Engine) tbNameForMap(v reflect.Value) string {
+ if v.Type().Implements(tpTableName) {
+ return v.Interface().(TableName).TableName()
+ }
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ if v.Type().Implements(tpTableName) {
+ return v.Interface().(TableName).TableName()
+ }
+ }
+
+ return engine.TableMapper.Obj2Table(v.Type().Name())
+}
+
+func (engine *Engine) tbNameNoSchema(tablename interface{}) string {
+ switch tablename.(type) {
+ case []string:
+ t := tablename.([]string)
+ if len(t) > 1 {
+ return fmt.Sprintf("%v AS %v", engine.Quote(t[0]), engine.Quote(t[1]))
+ } else if len(t) == 1 {
+ return engine.Quote(t[0])
+ }
+ case []interface{}:
+ t := tablename.([]interface{})
+ l := len(t)
+ var table string
+ if l > 0 {
+ f := t[0]
+ switch f.(type) {
+ case string:
+ table = f.(string)
+ case TableName:
+ table = f.(TableName).TableName()
+ default:
+ v := rValue(f)
+ t := v.Type()
+ if t.Kind() == reflect.Struct {
+ table = engine.tbNameForMap(v)
+ } else {
+ table = engine.Quote(fmt.Sprintf("%v", f))
+ }
+ }
+ }
+ if l > 1 {
+ return fmt.Sprintf("%v AS %v", engine.Quote(table),
+ engine.Quote(fmt.Sprintf("%v", t[1])))
+ } else if l == 1 {
+ return engine.Quote(table)
+ }
+ case TableName:
+ return tablename.(TableName).TableName()
+ case string:
+ return tablename.(string)
+ case reflect.Value:
+ v := tablename.(reflect.Value)
+ return engine.tbNameForMap(v)
+ default:
+ v := rValue(tablename)
+ t := v.Type()
+ if t.Kind() == reflect.Struct {
+ return engine.tbNameForMap(v)
+ }
+ return engine.Quote(fmt.Sprintf("%v", tablename))
+ }
+ return ""
+}
diff --git a/vendor/github.com/go-xorm/xorm/error.go b/vendor/github.com/go-xorm/xorm/error.go
new file mode 100644
index 0000000..a67527a
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/error.go
@@ -0,0 +1,51 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ // ErrParamsType params error
+ ErrParamsType = errors.New("Params type error")
+ // ErrTableNotFound table not found error
+ ErrTableNotFound = errors.New("Table not found")
+ // ErrUnSupportedType unsupported error
+ ErrUnSupportedType = errors.New("Unsupported type error")
+ // ErrNotExist record does not exist error
+ ErrNotExist = errors.New("Record does not exist")
+ // ErrCacheFailed cache failed error
+ ErrCacheFailed = errors.New("Cache failed")
+ // ErrNeedDeletedCond delete needs less one condition error
+ ErrNeedDeletedCond = errors.New("Delete action needs at least one condition")
+ // ErrNotImplemented not implemented
+ ErrNotImplemented = errors.New("Not implemented")
+ // ErrConditionType condition type unsupported
+ ErrConditionType = errors.New("Unsupported condition type")
+ // ErrUnSupportedSQLType parameter of SQL is not supported
+ ErrUnSupportedSQLType = errors.New("unsupported sql type")
+)
+
+// ErrFieldIsNotExist columns does not exist
+type ErrFieldIsNotExist struct {
+ FieldName string
+ TableName string
+}
+
+func (e ErrFieldIsNotExist) Error() string {
+ return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName)
+}
+
+// ErrFieldIsNotValid is not valid
+type ErrFieldIsNotValid struct {
+ FieldName string
+ TableName string
+}
+
+func (e ErrFieldIsNotValid) Error() string {
+ return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName)
+}
diff --git a/vendor/github.com/go-xorm/xorm/gen_reserved.sh b/vendor/github.com/go-xorm/xorm/gen_reserved.sh
new file mode 100644
index 0000000..434a1bf
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/gen_reserved.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+if [ -f $1 ];then
+ cat $1| awk '{printf("\""$1"\":true,\n")}'
+else
+ echo "argument $1 if not a file!"
+fi
diff --git a/vendor/github.com/go-xorm/xorm/go.mod b/vendor/github.com/go-xorm/xorm/go.mod
new file mode 100644
index 0000000..9a30e79
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/go.mod
@@ -0,0 +1,19 @@
+module github.com/go-xorm/xorm
+
+require (
+ github.com/cockroachdb/apd v1.1.0 // indirect
+ github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4
+ github.com/go-sql-driver/mysql v1.4.1
+ github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
+ github.com/jackc/pgx v3.3.0+incompatible
+ github.com/kr/pretty v0.1.0 // indirect
+ github.com/lib/pq v1.0.0
+ github.com/mattn/go-sqlite3 v1.10.0
+ github.com/pkg/errors v0.8.1 // indirect
+ github.com/satori/go.uuid v1.2.0 // indirect
+ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
+ github.com/stretchr/testify v1.3.0
+ github.com/ziutek/mymysql v1.5.4
+ xorm.io/builder v0.3.5
+ xorm.io/core v0.7.0
+)
diff --git a/vendor/github.com/go-xorm/xorm/go.sum b/vendor/github.com/go-xorm/xorm/go.sum
new file mode 100644
index 0000000..370dcd0
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/go.sum
@@ -0,0 +1,168 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
+cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o=
+github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y=
+github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:56xuuqnHyryaerycW3BfssRdxQstACi0Epw/yC5E2xM=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
+github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
+github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90=
+github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
+github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
+github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw=
+google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+xorm.io/builder v0.3.5 h1:EilU39fvWDxjb1cDaELpYhsF+zziRBhew8xk4pngO+A=
+xorm.io/builder v0.3.5/go.mod h1:ZFbByS/KxZI1FKRjL05PyJ4YrK2bcxlUaAxdum5aTR8=
+xorm.io/core v0.7.0 h1:hKxuOKWZNeiFQsSuGet/KV8HZ788hclvAl+7azx3tkM=
+xorm.io/core v0.7.0/go.mod h1:TuOJjIVa7e3w/rN8tDcAvuLBMtwzdHPbyOzE6Gk1EUI=
diff --git a/vendor/github.com/go-xorm/xorm/helpers.go b/vendor/github.com/go-xorm/xorm/helpers.go
new file mode 100644
index 0000000..a31e922
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/helpers.go
@@ -0,0 +1,332 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "xorm.io/core"
+)
+
+// str2PK convert string value to primary key value according to tp
+func str2PKValue(s string, tp reflect.Type) (reflect.Value, error) {
+ var err error
+ var result interface{}
+ var defReturn = reflect.Zero(tp)
+
+ switch tp.Kind() {
+ case reflect.Int:
+ result, err = strconv.Atoi(s)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as int: %s", s, err.Error())
+ }
+ case reflect.Int8:
+ x, err := strconv.Atoi(s)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as int8: %s", s, err.Error())
+ }
+ result = int8(x)
+ case reflect.Int16:
+ x, err := strconv.Atoi(s)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as int16: %s", s, err.Error())
+ }
+ result = int16(x)
+ case reflect.Int32:
+ x, err := strconv.Atoi(s)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as int32: %s", s, err.Error())
+ }
+ result = int32(x)
+ case reflect.Int64:
+ result, err = strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as int64: %s", s, err.Error())
+ }
+ case reflect.Uint:
+ x, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as uint: %s", s, err.Error())
+ }
+ result = uint(x)
+ case reflect.Uint8:
+ x, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as uint8: %s", s, err.Error())
+ }
+ result = uint8(x)
+ case reflect.Uint16:
+ x, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as uint16: %s", s, err.Error())
+ }
+ result = uint16(x)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as uint32: %s", s, err.Error())
+ }
+ result = uint32(x)
+ case reflect.Uint64:
+ result, err = strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return defReturn, fmt.Errorf("convert %s as uint64: %s", s, err.Error())
+ }
+ case reflect.String:
+ result = s
+ default:
+ return defReturn, errors.New("unsupported convert type")
+ }
+ return reflect.ValueOf(result).Convert(tp), nil
+}
+
+func str2PK(s string, tp reflect.Type) (interface{}, error) {
+ v, err := str2PKValue(s, tp)
+ if err != nil {
+ return nil, err
+ }
+ return v.Interface(), nil
+}
+
+func splitTag(tag string) (tags []string) {
+ tag = strings.TrimSpace(tag)
+ var hasQuote = false
+ var lastIdx = 0
+ for i, t := range tag {
+ if t == '\'' {
+ hasQuote = !hasQuote
+ } else if t == ' ' {
+ if lastIdx < i && !hasQuote {
+ tags = append(tags, strings.TrimSpace(tag[lastIdx:i]))
+ lastIdx = i + 1
+ }
+ }
+ }
+ if lastIdx < len(tag) {
+ tags = append(tags, strings.TrimSpace(tag[lastIdx:]))
+ }
+ return
+}
+
+type zeroable interface {
+ IsZero() bool
+}
+
+func isZero(k interface{}) bool {
+ switch k.(type) {
+ case int:
+ return k.(int) == 0
+ case int8:
+ return k.(int8) == 0
+ case int16:
+ return k.(int16) == 0
+ case int32:
+ return k.(int32) == 0
+ case int64:
+ return k.(int64) == 0
+ case uint:
+ return k.(uint) == 0
+ case uint8:
+ return k.(uint8) == 0
+ case uint16:
+ return k.(uint16) == 0
+ case uint32:
+ return k.(uint32) == 0
+ case uint64:
+ return k.(uint64) == 0
+ case float32:
+ return k.(float32) == 0
+ case float64:
+ return k.(float64) == 0
+ case bool:
+ return k.(bool) == false
+ case string:
+ return k.(string) == ""
+ case zeroable:
+ return k.(zeroable).IsZero()
+ }
+ return false
+}
+
+func isStructZero(v reflect.Value) bool {
+ if !v.IsValid() {
+ return true
+ }
+
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ switch field.Kind() {
+ case reflect.Ptr:
+ field = field.Elem()
+ fallthrough
+ case reflect.Struct:
+ if !isStructZero(field) {
+ return false
+ }
+ default:
+ if field.CanInterface() && !isZero(field.Interface()) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func isArrayValueZero(v reflect.Value) bool {
+ if !v.IsValid() || v.Len() == 0 {
+ return true
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ if !isZero(v.Index(i).Interface()) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func int64ToIntValue(id int64, tp reflect.Type) reflect.Value {
+ var v interface{}
+ kind := tp.Kind()
+
+ if kind == reflect.Ptr {
+ kind = tp.Elem().Kind()
+ }
+
+ switch kind {
+ case reflect.Int16:
+ temp := int16(id)
+ v = &temp
+ case reflect.Int32:
+ temp := int32(id)
+ v = &temp
+ case reflect.Int:
+ temp := int(id)
+ v = &temp
+ case reflect.Int64:
+ temp := id
+ v = &temp
+ case reflect.Uint16:
+ temp := uint16(id)
+ v = &temp
+ case reflect.Uint32:
+ temp := uint32(id)
+ v = &temp
+ case reflect.Uint64:
+ temp := uint64(id)
+ v = &temp
+ case reflect.Uint:
+ temp := uint(id)
+ v = &temp
+ }
+
+ if tp.Kind() == reflect.Ptr {
+ return reflect.ValueOf(v).Convert(tp)
+ }
+ return reflect.ValueOf(v).Elem().Convert(tp)
+}
+
+func int64ToInt(id int64, tp reflect.Type) interface{} {
+ return int64ToIntValue(id, tp).Interface()
+}
+
+func isPKZero(pk core.PK) bool {
+ for _, k := range pk {
+ if isZero(k) {
+ return true
+ }
+ }
+ return false
+}
+
+func indexNoCase(s, sep string) int {
+ return strings.Index(strings.ToLower(s), strings.ToLower(sep))
+}
+
+func splitNoCase(s, sep string) []string {
+ idx := indexNoCase(s, sep)
+ if idx < 0 {
+ return []string{s}
+ }
+ return strings.Split(s, s[idx:idx+len(sep)])
+}
+
+func splitNNoCase(s, sep string, n int) []string {
+ idx := indexNoCase(s, sep)
+ if idx < 0 {
+ return []string{s}
+ }
+ return strings.SplitN(s, s[idx:idx+len(sep)], n)
+}
+
+func makeArray(elem string, count int) []string {
+ res := make([]string, count)
+ for i := 0; i < count; i++ {
+ res[i] = elem
+ }
+ return res
+}
+
+func rValue(bean interface{}) reflect.Value {
+ return reflect.Indirect(reflect.ValueOf(bean))
+}
+
+func rType(bean interface{}) reflect.Type {
+ sliceValue := reflect.Indirect(reflect.ValueOf(bean))
+ // return reflect.TypeOf(sliceValue.Interface())
+ return sliceValue.Type()
+}
+
+func structName(v reflect.Type) string {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ return v.Name()
+}
+
+func sliceEq(left, right []string) bool {
+ if len(left) != len(right) {
+ return false
+ }
+ sort.Sort(sort.StringSlice(left))
+ sort.Sort(sort.StringSlice(right))
+ for i := 0; i < len(left); i++ {
+ if left[i] != right[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func indexName(tableName, idxName string) string {
+ return fmt.Sprintf("IDX_%v_%v", tableName, idxName)
+}
+
+func eraseAny(value string, strToErase ...string) string {
+ if len(strToErase) == 0 {
+ return value
+ }
+ var replaceSeq []string
+ for _, s := range strToErase {
+ replaceSeq = append(replaceSeq, s, "")
+ }
+
+ replacer := strings.NewReplacer(replaceSeq...)
+
+ return replacer.Replace(value)
+}
+
+func quoteColumns(cols []string, quoteFunc func(string) string, sep string) string {
+ for i := range cols {
+ cols[i] = quoteFunc(cols[i])
+ }
+ return strings.Join(cols, sep+" ")
+}
diff --git a/vendor/github.com/go-xorm/xorm/helpler_time.go b/vendor/github.com/go-xorm/xorm/helpler_time.go
new file mode 100644
index 0000000..f4013e2
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/helpler_time.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import "time"
+
+const (
+ zeroTime0 = "0000-00-00 00:00:00"
+ zeroTime1 = "0001-01-01 00:00:00"
+)
+
+func formatTime(t time.Time) string {
+ return t.Format("2006-01-02 15:04:05")
+}
+
+func isTimeZero(t time.Time) bool {
+ return t.IsZero() || formatTime(t) == zeroTime0 ||
+ formatTime(t) == zeroTime1
+}
diff --git a/vendor/github.com/go-xorm/xorm/interface.go b/vendor/github.com/go-xorm/xorm/interface.go
new file mode 100644
index 0000000..0928f66
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/interface.go
@@ -0,0 +1,118 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "context"
+ "database/sql"
+ "reflect"
+ "time"
+
+ "xorm.io/core"
+)
+
+// Interface defines the interface which Engine, EngineGroup and Session will implementate.
+type Interface interface {
+ AllCols() *Session
+ Alias(alias string) *Session
+ Asc(colNames ...string) *Session
+ BufferSize(size int) *Session
+ Cols(columns ...string) *Session
+ Count(...interface{}) (int64, error)
+ CreateIndexes(bean interface{}) error
+ CreateUniques(bean interface{}) error
+ Decr(column string, arg ...interface{}) *Session
+ Desc(...string) *Session
+ Delete(interface{}) (int64, error)
+ Distinct(columns ...string) *Session
+ DropIndexes(bean interface{}) error
+ Exec(sqlOrArgs ...interface{}) (sql.Result, error)
+ Exist(bean ...interface{}) (bool, error)
+ Find(interface{}, ...interface{}) error
+ FindAndCount(interface{}, ...interface{}) (int64, error)
+ Get(interface{}) (bool, error)
+ GroupBy(keys string) *Session
+ ID(interface{}) *Session
+ In(string, ...interface{}) *Session
+ Incr(column string, arg ...interface{}) *Session
+ Insert(...interface{}) (int64, error)
+ InsertOne(interface{}) (int64, error)
+ IsTableEmpty(bean interface{}) (bool, error)
+ IsTableExist(beanOrTableName interface{}) (bool, error)
+ Iterate(interface{}, IterFunc) error
+ Limit(int, ...int) *Session
+ MustCols(columns ...string) *Session
+ NoAutoCondition(...bool) *Session
+ NotIn(string, ...interface{}) *Session
+ Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session
+ Omit(columns ...string) *Session
+ OrderBy(order string) *Session
+ Ping() error
+ Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error)
+ QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error)
+ QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error)
+ Rows(bean interface{}) (*Rows, error)
+ SetExpr(string, string) *Session
+ SQL(interface{}, ...interface{}) *Session
+ Sum(bean interface{}, colName string) (float64, error)
+ SumInt(bean interface{}, colName string) (int64, error)
+ Sums(bean interface{}, colNames ...string) ([]float64, error)
+ SumsInt(bean interface{}, colNames ...string) ([]int64, error)
+ Table(tableNameOrBean interface{}) *Session
+ Unscoped() *Session
+ Update(bean interface{}, condiBeans ...interface{}) (int64, error)
+ UseBool(...string) *Session
+ Where(interface{}, ...interface{}) *Session
+}
+
+// EngineInterface defines the interface which Engine, EngineGroup will implementate.
+type EngineInterface interface {
+ Interface
+
+ Before(func(interface{})) *Session
+ Charset(charset string) *Session
+ ClearCache(...interface{}) error
+ Context(context.Context) *Session
+ CreateTables(...interface{}) error
+ DBMetas() ([]*core.Table, error)
+ Dialect() core.Dialect
+ DropTables(...interface{}) error
+ DumpAllToFile(fp string, tp ...core.DbType) error
+ GetCacher(string) core.Cacher
+ GetColumnMapper() core.IMapper
+ GetDefaultCacher() core.Cacher
+ GetTableMapper() core.IMapper
+ GetTZDatabase() *time.Location
+ GetTZLocation() *time.Location
+ MapCacher(interface{}, core.Cacher) error
+ NewSession() *Session
+ NoAutoTime() *Session
+ Quote(string) string
+ SetCacher(string, core.Cacher)
+ SetConnMaxLifetime(time.Duration)
+ SetDefaultCacher(core.Cacher)
+ SetLogger(logger core.ILogger)
+ SetLogLevel(core.LogLevel)
+ SetMapper(core.IMapper)
+ SetMaxOpenConns(int)
+ SetMaxIdleConns(int)
+ SetSchema(string)
+ SetTZDatabase(tz *time.Location)
+ SetTZLocation(tz *time.Location)
+ ShowExecTime(...bool)
+ ShowSQL(show ...bool)
+ Sync(...interface{}) error
+ Sync2(...interface{}) error
+ StoreEngine(storeEngine string) *Session
+ TableInfo(bean interface{}) *Table
+ TableName(interface{}, ...bool) string
+ UnMapType(reflect.Type)
+}
+
+var (
+ _ Interface = &Session{}
+ _ EngineInterface = &Engine{}
+ _ EngineInterface = &EngineGroup{}
+)
diff --git a/vendor/github.com/go-xorm/xorm/json.go b/vendor/github.com/go-xorm/xorm/json.go
new file mode 100644
index 0000000..fdb6ce5
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/json.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import "encoding/json"
+
+// JSONInterface represents an interface to handle json data
+type JSONInterface interface {
+ Marshal(v interface{}) ([]byte, error)
+ Unmarshal(data []byte, v interface{}) error
+}
+
+var (
+ // DefaultJSONHandler default json handler
+ DefaultJSONHandler JSONInterface = StdJSON{}
+)
+
+// StdJSON implements JSONInterface via encoding/json
+type StdJSON struct{}
+
+// Marshal implements JSONInterface
+func (StdJSON) Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// Unmarshal implements JSONInterface
+func (StdJSON) Unmarshal(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
diff --git a/vendor/github.com/go-xorm/xorm/logger.go b/vendor/github.com/go-xorm/xorm/logger.go
new file mode 100644
index 0000000..7b26e77
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/logger.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "fmt"
+ "io"
+ "log"
+
+ "xorm.io/core"
+)
+
+// default log options
+const (
+ DEFAULT_LOG_PREFIX = "[xorm]"
+ DEFAULT_LOG_FLAG = log.Ldate | log.Lmicroseconds
+ DEFAULT_LOG_LEVEL = core.LOG_DEBUG
+)
+
+var _ core.ILogger = DiscardLogger{}
+
+// DiscardLogger don't log implementation for core.ILogger
+type DiscardLogger struct{}
+
+// Debug empty implementation
+func (DiscardLogger) Debug(v ...interface{}) {}
+
+// Debugf empty implementation
+func (DiscardLogger) Debugf(format string, v ...interface{}) {}
+
+// Error empty implementation
+func (DiscardLogger) Error(v ...interface{}) {}
+
+// Errorf empty implementation
+func (DiscardLogger) Errorf(format string, v ...interface{}) {}
+
+// Info empty implementation
+func (DiscardLogger) Info(v ...interface{}) {}
+
+// Infof empty implementation
+func (DiscardLogger) Infof(format string, v ...interface{}) {}
+
+// Warn empty implementation
+func (DiscardLogger) Warn(v ...interface{}) {}
+
+// Warnf empty implementation
+func (DiscardLogger) Warnf(format string, v ...interface{}) {}
+
+// Level empty implementation
+func (DiscardLogger) Level() core.LogLevel {
+ return core.LOG_UNKNOWN
+}
+
+// SetLevel empty implementation
+func (DiscardLogger) SetLevel(l core.LogLevel) {}
+
+// ShowSQL empty implementation
+func (DiscardLogger) ShowSQL(show ...bool) {}
+
+// IsShowSQL empty implementation
+func (DiscardLogger) IsShowSQL() bool {
+ return false
+}
+
+// SimpleLogger is the default implment of core.ILogger
+type SimpleLogger struct {
+ DEBUG *log.Logger
+ ERR *log.Logger
+ INFO *log.Logger
+ WARN *log.Logger
+ level core.LogLevel
+ showSQL bool
+}
+
+var _ core.ILogger = &SimpleLogger{}
+
+// NewSimpleLogger use a special io.Writer as logger output
+func NewSimpleLogger(out io.Writer) *SimpleLogger {
+ return NewSimpleLogger2(out, DEFAULT_LOG_PREFIX, DEFAULT_LOG_FLAG)
+}
+
+// NewSimpleLogger2 let you customrize your logger prefix and flag
+func NewSimpleLogger2(out io.Writer, prefix string, flag int) *SimpleLogger {
+ return NewSimpleLogger3(out, prefix, flag, DEFAULT_LOG_LEVEL)
+}
+
+// NewSimpleLogger3 let you customrize your logger prefix and flag and logLevel
+func NewSimpleLogger3(out io.Writer, prefix string, flag int, l core.LogLevel) *SimpleLogger {
+ return &SimpleLogger{
+ DEBUG: log.New(out, fmt.Sprintf("%s [debug] ", prefix), flag),
+ ERR: log.New(out, fmt.Sprintf("%s [error] ", prefix), flag),
+ INFO: log.New(out, fmt.Sprintf("%s [info] ", prefix), flag),
+ WARN: log.New(out, fmt.Sprintf("%s [warn] ", prefix), flag),
+ level: l,
+ }
+}
+
+// Error implement core.ILogger
+func (s *SimpleLogger) Error(v ...interface{}) {
+ if s.level <= core.LOG_ERR {
+ s.ERR.Output(2, fmt.Sprint(v...))
+ }
+ return
+}
+
+// Errorf implement core.ILogger
+func (s *SimpleLogger) Errorf(format string, v ...interface{}) {
+ if s.level <= core.LOG_ERR {
+ s.ERR.Output(2, fmt.Sprintf(format, v...))
+ }
+ return
+}
+
+// Debug implement core.ILogger
+func (s *SimpleLogger) Debug(v ...interface{}) {
+ if s.level <= core.LOG_DEBUG {
+ s.DEBUG.Output(2, fmt.Sprint(v...))
+ }
+ return
+}
+
+// Debugf implement core.ILogger
+func (s *SimpleLogger) Debugf(format string, v ...interface{}) {
+ if s.level <= core.LOG_DEBUG {
+ s.DEBUG.Output(2, fmt.Sprintf(format, v...))
+ }
+ return
+}
+
+// Info implement core.ILogger
+func (s *SimpleLogger) Info(v ...interface{}) {
+ if s.level <= core.LOG_INFO {
+ s.INFO.Output(2, fmt.Sprint(v...))
+ }
+ return
+}
+
+// Infof implement core.ILogger
+func (s *SimpleLogger) Infof(format string, v ...interface{}) {
+ if s.level <= core.LOG_INFO {
+ s.INFO.Output(2, fmt.Sprintf(format, v...))
+ }
+ return
+}
+
+// Warn implement core.ILogger
+func (s *SimpleLogger) Warn(v ...interface{}) {
+ if s.level <= core.LOG_WARNING {
+ s.WARN.Output(2, fmt.Sprint(v...))
+ }
+ return
+}
+
+// Warnf implement core.ILogger
+func (s *SimpleLogger) Warnf(format string, v ...interface{}) {
+ if s.level <= core.LOG_WARNING {
+ s.WARN.Output(2, fmt.Sprintf(format, v...))
+ }
+ return
+}
+
+// Level implement core.ILogger
+func (s *SimpleLogger) Level() core.LogLevel {
+ return s.level
+}
+
+// SetLevel implement core.ILogger
+func (s *SimpleLogger) SetLevel(l core.LogLevel) {
+ s.level = l
+ return
+}
+
+// ShowSQL implement core.ILogger
+func (s *SimpleLogger) ShowSQL(show ...bool) {
+ if len(show) == 0 {
+ s.showSQL = true
+ return
+ }
+ s.showSQL = show[0]
+}
+
+// IsShowSQL implement core.ILogger
+func (s *SimpleLogger) IsShowSQL() bool {
+ return s.showSQL
+}
diff --git a/vendor/github.com/go-xorm/xorm/pg_reserved.txt b/vendor/github.com/go-xorm/xorm/pg_reserved.txt
new file mode 100644
index 0000000..720ed37
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/pg_reserved.txt
@@ -0,0 +1,746 @@
+A non-reserved non-reserved
+ABORT non-reserved
+ABS reserved reserved
+ABSENT non-reserved non-reserved
+ABSOLUTE non-reserved non-reserved non-reserved reserved
+ACCESS non-reserved
+ACCORDING non-reserved non-reserved
+ACTION non-reserved non-reserved non-reserved reserved
+ADA non-reserved non-reserved non-reserved
+ADD non-reserved non-reserved non-reserved reserved
+ADMIN non-reserved non-reserved non-reserved
+AFTER non-reserved non-reserved non-reserved
+AGGREGATE non-reserved
+ALL reserved reserved reserved reserved
+ALLOCATE reserved reserved reserved
+ALSO non-reserved
+ALTER non-reserved reserved reserved reserved
+ALWAYS non-reserved non-reserved non-reserved
+ANALYSE reserved
+ANALYZE reserved
+AND reserved reserved reserved reserved
+ANY reserved reserved reserved reserved
+ARE reserved reserved reserved
+ARRAY reserved reserved reserved
+ARRAY_AGG reserved reserved
+ARRAY_MAX_CARDINALITY reserved
+AS reserved reserved reserved reserved
+ASC reserved non-reserved non-reserved reserved
+ASENSITIVE reserved reserved
+ASSERTION non-reserved non-reserved non-reserved reserved
+ASSIGNMENT non-reserved non-reserved non-reserved
+ASYMMETRIC reserved reserved reserved
+AT non-reserved reserved reserved reserved
+ATOMIC reserved reserved
+ATTRIBUTE non-reserved non-reserved non-reserved
+ATTRIBUTES non-reserved non-reserved
+AUTHORIZATION reserved (can be function or type) reserved reserved reserved
+AVG reserved reserved reserved
+BACKWARD non-reserved
+BASE64 non-reserved non-reserved
+BEFORE non-reserved non-reserved non-reserved
+BEGIN non-reserved reserved reserved reserved
+BEGIN_FRAME reserved
+BEGIN_PARTITION reserved
+BERNOULLI non-reserved non-reserved
+BETWEEN non-reserved (cannot be function or type) reserved reserved reserved
+BIGINT non-reserved (cannot be function or type) reserved reserved
+BINARY reserved (can be function or type) reserved reserved
+BIT non-reserved (cannot be function or type) reserved
+BIT_LENGTH reserved
+BLOB reserved reserved
+BLOCKED non-reserved non-reserved
+BOM non-reserved non-reserved
+BOOLEAN non-reserved (cannot be function or type) reserved reserved
+BOTH reserved reserved reserved reserved
+BREADTH non-reserved non-reserved
+BY non-reserved reserved reserved reserved
+C non-reserved non-reserved non-reserved
+CACHE non-reserved
+CALL reserved reserved
+CALLED non-reserved reserved reserved
+CARDINALITY reserved reserved
+CASCADE non-reserved non-reserved non-reserved reserved
+CASCADED non-reserved reserved reserved reserved
+CASE reserved reserved reserved reserved
+CAST reserved reserved reserved reserved
+CATALOG non-reserved non-reserved non-reserved reserved
+CATALOG_NAME non-reserved non-reserved non-reserved
+CEIL reserved reserved
+CEILING reserved reserved
+CHAIN non-reserved non-reserved non-reserved
+CHAR non-reserved (cannot be function or type) reserved reserved reserved
+CHARACTER non-reserved (cannot be function or type) reserved reserved reserved
+CHARACTERISTICS non-reserved non-reserved non-reserved
+CHARACTERS non-reserved non-reserved
+CHARACTER_LENGTH reserved reserved reserved
+CHARACTER_SET_CATALOG non-reserved non-reserved non-reserved
+CHARACTER_SET_NAME non-reserved non-reserved non-reserved
+CHARACTER_SET_SCHEMA non-reserved non-reserved non-reserved
+CHAR_LENGTH reserved reserved reserved
+CHECK reserved reserved reserved reserved
+CHECKPOINT non-reserved
+CLASS non-reserved
+CLASS_ORIGIN non-reserved non-reserved non-reserved
+CLOB reserved reserved
+CLOSE non-reserved reserved reserved reserved
+CLUSTER non-reserved
+COALESCE non-reserved (cannot be function or type) reserved reserved reserved
+COBOL non-reserved non-reserved non-reserved
+COLLATE reserved reserved reserved reserved
+COLLATION reserved (can be function or type) non-reserved non-reserved reserved
+COLLATION_CATALOG non-reserved non-reserved non-reserved
+COLLATION_NAME non-reserved non-reserved non-reserved
+COLLATION_SCHEMA non-reserved non-reserved non-reserved
+COLLECT reserved reserved
+COLUMN reserved reserved reserved reserved
+COLUMNS non-reserved non-reserved
+COLUMN_NAME non-reserved non-reserved non-reserved
+COMMAND_FUNCTION non-reserved non-reserved non-reserved
+COMMAND_FUNCTION_CODE non-reserved non-reserved
+COMMENT non-reserved
+COMMENTS non-reserved
+COMMIT non-reserved reserved reserved reserved
+COMMITTED non-reserved non-reserved non-reserved non-reserved
+CONCURRENTLY reserved (can be function or type)
+CONDITION reserved reserved
+CONDITION_NUMBER non-reserved non-reserved non-reserved
+CONFIGURATION non-reserved
+CONNECT reserved reserved reserved
+CONNECTION non-reserved non-reserved non-reserved reserved
+CONNECTION_NAME non-reserved non-reserved non-reserved
+CONSTRAINT reserved reserved reserved reserved
+CONSTRAINTS non-reserved non-reserved non-reserved reserved
+CONSTRAINT_CATALOG non-reserved non-reserved non-reserved
+CONSTRAINT_NAME non-reserved non-reserved non-reserved
+CONSTRAINT_SCHEMA non-reserved non-reserved non-reserved
+CONSTRUCTOR non-reserved non-reserved
+CONTAINS reserved non-reserved
+CONTENT non-reserved non-reserved non-reserved
+CONTINUE non-reserved non-reserved non-reserved reserved
+CONTROL non-reserved non-reserved
+CONVERSION non-reserved
+CONVERT reserved reserved reserved
+COPY non-reserved
+CORR reserved reserved
+CORRESPONDING reserved reserved reserved
+COST non-reserved
+COUNT reserved reserved reserved
+COVAR_POP reserved reserved
+COVAR_SAMP reserved reserved
+CREATE reserved reserved reserved reserved
+CROSS reserved (can be function or type) reserved reserved reserved
+CSV non-reserved
+CUBE reserved reserved
+CUME_DIST reserved reserved
+CURRENT non-reserved reserved reserved reserved
+CURRENT_CATALOG reserved reserved reserved
+CURRENT_DATE reserved reserved reserved reserved
+CURRENT_DEFAULT_TRANSFORM_GROUP reserved reserved
+CURRENT_PATH reserved reserved
+CURRENT_ROLE reserved reserved reserved
+CURRENT_ROW reserved
+CURRENT_SCHEMA reserved (can be function or type) reserved reserved
+CURRENT_TIME reserved reserved reserved reserved
+CURRENT_TIMESTAMP reserved reserved reserved reserved
+CURRENT_TRANSFORM_GROUP_FOR_TYPE reserved reserved
+CURRENT_USER reserved reserved reserved reserved
+CURSOR non-reserved reserved reserved reserved
+CURSOR_NAME non-reserved non-reserved non-reserved
+CYCLE non-reserved reserved reserved
+DATA non-reserved non-reserved non-reserved non-reserved
+DATABASE non-reserved
+DATALINK reserved reserved
+DATE reserved reserved reserved
+DATETIME_INTERVAL_CODE non-reserved non-reserved non-reserved
+DATETIME_INTERVAL_PRECISION non-reserved non-reserved non-reserved
+DAY non-reserved reserved reserved reserved
+DB non-reserved non-reserved
+DEALLOCATE non-reserved reserved reserved reserved
+DEC non-reserved (cannot be function or type) reserved reserved reserved
+DECIMAL non-reserved (cannot be function or type) reserved reserved reserved
+DECLARE non-reserved reserved reserved reserved
+DEFAULT reserved reserved reserved reserved
+DEFAULTS non-reserved non-reserved non-reserved
+DEFERRABLE reserved non-reserved non-reserved reserved
+DEFERRED non-reserved non-reserved non-reserved reserved
+DEFINED non-reserved non-reserved
+DEFINER non-reserved non-reserved non-reserved
+DEGREE non-reserved non-reserved
+DELETE non-reserved reserved reserved reserved
+DELIMITER non-reserved
+DELIMITERS non-reserved
+DENSE_RANK reserved reserved
+DEPTH non-reserved non-reserved
+DEREF reserved reserved
+DERIVED non-reserved non-reserved
+DESC reserved non-reserved non-reserved reserved
+DESCRIBE reserved reserved reserved
+DESCRIPTOR non-reserved non-reserved reserved
+DETERMINISTIC reserved reserved
+DIAGNOSTICS non-reserved non-reserved reserved
+DICTIONARY non-reserved
+DISABLE non-reserved
+DISCARD non-reserved
+DISCONNECT reserved reserved reserved
+DISPATCH non-reserved non-reserved
+DISTINCT reserved reserved reserved reserved
+DLNEWCOPY reserved reserved
+DLPREVIOUSCOPY reserved reserved
+DLURLCOMPLETE reserved reserved
+DLURLCOMPLETEONLY reserved reserved
+DLURLCOMPLETEWRITE reserved reserved
+DLURLPATH reserved reserved
+DLURLPATHONLY reserved reserved
+DLURLPATHWRITE reserved reserved
+DLURLSCHEME reserved reserved
+DLURLSERVER reserved reserved
+DLVALUE reserved reserved
+DO reserved
+DOCUMENT non-reserved non-reserved non-reserved
+DOMAIN non-reserved non-reserved non-reserved reserved
+DOUBLE non-reserved reserved reserved reserved
+DROP non-reserved reserved reserved reserved
+DYNAMIC reserved reserved
+DYNAMIC_FUNCTION non-reserved non-reserved non-reserved
+DYNAMIC_FUNCTION_CODE non-reserved non-reserved
+EACH non-reserved reserved reserved
+ELEMENT reserved reserved
+ELSE reserved reserved reserved reserved
+EMPTY non-reserved non-reserved
+ENABLE non-reserved
+ENCODING non-reserved non-reserved non-reserved
+ENCRYPTED non-reserved
+END reserved reserved reserved reserved
+END-EXEC reserved reserved reserved
+END_FRAME reserved
+END_PARTITION reserved
+ENFORCED non-reserved
+ENUM non-reserved
+EQUALS reserved non-reserved
+ESCAPE non-reserved reserved reserved reserved
+EVENT non-reserved
+EVERY reserved reserved
+EXCEPT reserved reserved reserved reserved
+EXCEPTION reserved
+EXCLUDE non-reserved non-reserved non-reserved
+EXCLUDING non-reserved non-reserved non-reserved
+EXCLUSIVE non-reserved
+EXEC reserved reserved reserved
+EXECUTE non-reserved reserved reserved reserved
+EXISTS non-reserved (cannot be function or type) reserved reserved reserved
+EXP reserved reserved
+EXPLAIN non-reserved
+EXPRESSION non-reserved
+EXTENSION non-reserved
+EXTERNAL non-reserved reserved reserved reserved
+EXTRACT non-reserved (cannot be function or type) reserved reserved reserved
+FALSE reserved reserved reserved reserved
+FAMILY non-reserved
+FETCH reserved reserved reserved reserved
+FILE non-reserved non-reserved
+FILTER reserved reserved
+FINAL non-reserved non-reserved
+FIRST non-reserved non-reserved non-reserved reserved
+FIRST_VALUE reserved reserved
+FLAG non-reserved non-reserved
+FLOAT non-reserved (cannot be function or type) reserved reserved reserved
+FLOOR reserved reserved
+FOLLOWING non-reserved non-reserved non-reserved
+FOR reserved reserved reserved reserved
+FORCE non-reserved
+FOREIGN reserved reserved reserved reserved
+FORTRAN non-reserved non-reserved non-reserved
+FORWARD non-reserved
+FOUND non-reserved non-reserved reserved
+FRAME_ROW reserved
+FREE reserved reserved
+FREEZE reserved (can be function or type)
+FROM reserved reserved reserved reserved
+FS non-reserved non-reserved
+FULL reserved (can be function or type) reserved reserved reserved
+FUNCTION non-reserved reserved reserved
+FUNCTIONS non-reserved
+FUSION reserved reserved
+G non-reserved non-reserved
+GENERAL non-reserved non-reserved
+GENERATED non-reserved non-reserved
+GET reserved reserved reserved
+GLOBAL non-reserved reserved reserved reserved
+GO non-reserved non-reserved reserved
+GOTO non-reserved non-reserved reserved
+GRANT reserved reserved reserved reserved
+GRANTED non-reserved non-reserved non-reserved
+GREATEST non-reserved (cannot be function or type)
+GROUP reserved reserved reserved reserved
+GROUPING reserved reserved
+GROUPS reserved
+HANDLER non-reserved
+HAVING reserved reserved reserved reserved
+HEADER non-reserved
+HEX non-reserved non-reserved
+HIERARCHY non-reserved non-reserved
+HOLD non-reserved reserved reserved
+HOUR non-reserved reserved reserved reserved
+ID non-reserved non-reserved
+IDENTITY non-reserved reserved reserved reserved
+IF non-reserved
+IGNORE non-reserved non-reserved
+ILIKE reserved (can be function or type)
+IMMEDIATE non-reserved non-reserved non-reserved reserved
+IMMEDIATELY non-reserved
+IMMUTABLE non-reserved
+IMPLEMENTATION non-reserved non-reserved
+IMPLICIT non-reserved
+IMPORT reserved reserved
+IN reserved reserved reserved reserved
+INCLUDING non-reserved non-reserved non-reserved
+INCREMENT non-reserved non-reserved non-reserved
+INDENT non-reserved non-reserved
+INDEX non-reserved
+INDEXES non-reserved
+INDICATOR reserved reserved reserved
+INHERIT non-reserved
+INHERITS non-reserved
+INITIALLY reserved non-reserved non-reserved reserved
+INLINE non-reserved
+INNER reserved (can be function or type) reserved reserved reserved
+INOUT non-reserved (cannot be function or type) reserved reserved
+INPUT non-reserved non-reserved non-reserved reserved
+INSENSITIVE non-reserved reserved reserved reserved
+INSERT non-reserved reserved reserved reserved
+INSTANCE non-reserved non-reserved
+INSTANTIABLE non-reserved non-reserved
+INSTEAD non-reserved non-reserved non-reserved
+INT non-reserved (cannot be function or type) reserved reserved reserved
+INTEGER non-reserved (cannot be function or type) reserved reserved reserved
+INTEGRITY non-reserved non-reserved
+INTERSECT reserved reserved reserved reserved
+INTERSECTION reserved reserved
+INTERVAL non-reserved (cannot be function or type) reserved reserved reserved
+INTO reserved reserved reserved reserved
+INVOKER non-reserved non-reserved non-reserved
+IS reserved (can be function or type) reserved reserved reserved
+ISNULL reserved (can be function or type)
+ISOLATION non-reserved non-reserved non-reserved reserved
+JOIN reserved (can be function or type) reserved reserved reserved
+K non-reserved non-reserved
+KEY non-reserved non-reserved non-reserved reserved
+KEY_MEMBER non-reserved non-reserved
+KEY_TYPE non-reserved non-reserved
+LABEL non-reserved
+LAG reserved reserved
+LANGUAGE non-reserved reserved reserved reserved
+LARGE non-reserved reserved reserved
+LAST non-reserved non-reserved non-reserved reserved
+LAST_VALUE reserved reserved
+LATERAL reserved reserved reserved
+LC_COLLATE non-reserved
+LC_CTYPE non-reserved
+LEAD reserved reserved
+LEADING reserved reserved reserved reserved
+LEAKPROOF non-reserved
+LEAST non-reserved (cannot be function or type)
+LEFT reserved (can be function or type) reserved reserved reserved
+LENGTH non-reserved non-reserved non-reserved
+LEVEL non-reserved non-reserved non-reserved reserved
+LIBRARY non-reserved non-reserved
+LIKE reserved (can be function or type) reserved reserved reserved
+LIKE_REGEX reserved reserved
+LIMIT reserved non-reserved non-reserved
+LINK non-reserved non-reserved
+LISTEN non-reserved
+LN reserved reserved
+LOAD non-reserved
+LOCAL non-reserved reserved reserved reserved
+LOCALTIME reserved reserved reserved
+LOCALTIMESTAMP reserved reserved reserved
+LOCATION non-reserved non-reserved non-reserved
+LOCATOR non-reserved non-reserved
+LOCK non-reserved
+LOWER reserved reserved reserved
+M non-reserved non-reserved
+MAP non-reserved non-reserved
+MAPPING non-reserved non-reserved non-reserved
+MATCH non-reserved reserved reserved reserved
+MATCHED non-reserved non-reserved
+MATERIALIZED non-reserved
+MAX reserved reserved reserved
+MAXVALUE non-reserved non-reserved non-reserved
+MAX_CARDINALITY reserved
+MEMBER reserved reserved
+MERGE reserved reserved
+MESSAGE_LENGTH non-reserved non-reserved non-reserved
+MESSAGE_OCTET_LENGTH non-reserved non-reserved non-reserved
+MESSAGE_TEXT non-reserved non-reserved non-reserved
+METHOD reserved reserved
+MIN reserved reserved reserved
+MINUTE non-reserved reserved reserved reserved
+MINVALUE non-reserved non-reserved non-reserved
+MOD reserved reserved
+MODE non-reserved
+MODIFIES reserved reserved
+MODULE reserved reserved reserved
+MONTH non-reserved reserved reserved reserved
+MORE non-reserved non-reserved non-reserved
+MOVE non-reserved
+MULTISET reserved reserved
+MUMPS non-reserved non-reserved non-reserved
+NAME non-reserved non-reserved non-reserved non-reserved
+NAMES non-reserved non-reserved non-reserved reserved
+NAMESPACE non-reserved non-reserved
+NATIONAL non-reserved (cannot be function or type) reserved reserved reserved
+NATURAL reserved (can be function or type) reserved reserved reserved
+NCHAR non-reserved (cannot be function or type) reserved reserved reserved
+NCLOB reserved reserved
+NESTING non-reserved non-reserved
+NEW reserved reserved
+NEXT non-reserved non-reserved non-reserved reserved
+NFC non-reserved non-reserved
+NFD non-reserved non-reserved
+NFKC non-reserved non-reserved
+NFKD non-reserved non-reserved
+NIL non-reserved non-reserved
+NO non-reserved reserved reserved reserved
+NONE non-reserved (cannot be function or type) reserved reserved
+NORMALIZE reserved reserved
+NORMALIZED non-reserved non-reserved
+NOT reserved reserved reserved reserved
+NOTHING non-reserved
+NOTIFY non-reserved
+NOTNULL reserved (can be function or type)
+NOWAIT non-reserved
+NTH_VALUE reserved reserved
+NTILE reserved reserved
+NULL reserved reserved reserved reserved
+NULLABLE non-reserved non-reserved non-reserved
+NULLIF non-reserved (cannot be function or type) reserved reserved reserved
+NULLS non-reserved non-reserved non-reserved
+NUMBER non-reserved non-reserved non-reserved
+NUMERIC non-reserved (cannot be function or type) reserved reserved reserved
+OBJECT non-reserved non-reserved non-reserved
+OCCURRENCES_REGEX reserved reserved
+OCTETS non-reserved non-reserved
+OCTET_LENGTH reserved reserved reserved
+OF non-reserved reserved reserved reserved
+OFF non-reserved non-reserved non-reserved
+OFFSET reserved reserved reserved
+OIDS non-reserved
+OLD reserved reserved
+ON reserved reserved reserved reserved
+ONLY reserved reserved reserved reserved
+OPEN reserved reserved reserved
+OPERATOR non-reserved
+OPTION non-reserved non-reserved non-reserved reserved
+OPTIONS non-reserved non-reserved non-reserved
+OR reserved reserved reserved reserved
+ORDER reserved reserved reserved reserved
+ORDERING non-reserved non-reserved
+ORDINALITY non-reserved non-reserved
+OTHERS non-reserved non-reserved
+OUT non-reserved (cannot be function or type) reserved reserved
+OUTER reserved (can be function or type) reserved reserved reserved
+OUTPUT non-reserved non-reserved reserved
+OVER reserved (can be function or type) reserved reserved
+OVERLAPS reserved (can be function or type) reserved reserved reserved
+OVERLAY non-reserved (cannot be function or type) reserved reserved
+OVERRIDING non-reserved non-reserved
+OWNED non-reserved
+OWNER non-reserved
+P non-reserved non-reserved
+PAD non-reserved non-reserved reserved
+PARAMETER reserved reserved
+PARAMETER_MODE non-reserved non-reserved
+PARAMETER_NAME non-reserved non-reserved
+PARAMETER_ORDINAL_POSITION non-reserved non-reserved
+PARAMETER_SPECIFIC_CATALOG non-reserved non-reserved
+PARAMETER_SPECIFIC_NAME non-reserved non-reserved
+PARAMETER_SPECIFIC_SCHEMA non-reserved non-reserved
+PARSER non-reserved
+PARTIAL non-reserved non-reserved non-reserved reserved
+PARTITION non-reserved reserved reserved
+PASCAL non-reserved non-reserved non-reserved
+PASSING non-reserved non-reserved non-reserved
+PASSTHROUGH non-reserved non-reserved
+PASSWORD non-reserved
+PATH non-reserved non-reserved
+PERCENT reserved
+PERCENTILE_CONT reserved reserved
+PERCENTILE_DISC reserved reserved
+PERCENT_RANK reserved reserved
+PERIOD reserved
+PERMISSION non-reserved non-reserved
+PLACING reserved non-reserved non-reserved
+PLANS non-reserved
+PLI non-reserved non-reserved non-reserved
+PORTION reserved
+POSITION non-reserved (cannot be function or type) reserved reserved reserved
+POSITION_REGEX reserved reserved
+POWER reserved reserved
+PRECEDES reserved
+PRECEDING non-reserved non-reserved non-reserved
+PRECISION non-reserved (cannot be function or type) reserved reserved reserved
+PREPARE non-reserved reserved reserved reserved
+PREPARED non-reserved
+PRESERVE non-reserved non-reserved non-reserved reserved
+PRIMARY reserved reserved reserved reserved
+PRIOR non-reserved non-reserved non-reserved reserved
+PRIVILEGES non-reserved non-reserved non-reserved reserved
+PROCEDURAL non-reserved
+PROCEDURE non-reserved reserved reserved reserved
+PROGRAM non-reserved
+PUBLIC non-reserved non-reserved reserved
+QUOTE non-reserved
+RANGE non-reserved reserved reserved
+RANK reserved reserved
+READ non-reserved non-reserved non-reserved reserved
+READS reserved reserved
+REAL non-reserved (cannot be function or type) reserved reserved reserved
+REASSIGN non-reserved
+RECHECK non-reserved
+RECOVERY non-reserved non-reserved
+RECURSIVE non-reserved reserved reserved
+REF non-reserved reserved reserved
+REFERENCES reserved reserved reserved reserved
+REFERENCING reserved reserved
+REFRESH non-reserved
+REGR_AVGX reserved reserved
+REGR_AVGY reserved reserved
+REGR_COUNT reserved reserved
+REGR_INTERCEPT reserved reserved
+REGR_R2 reserved reserved
+REGR_SLOPE reserved reserved
+REGR_SXX reserved reserved
+REGR_SXY reserved reserved
+REGR_SYY reserved reserved
+REINDEX non-reserved
+RELATIVE non-reserved non-reserved non-reserved reserved
+RELEASE non-reserved reserved reserved
+RENAME non-reserved
+REPEATABLE non-reserved non-reserved non-reserved non-reserved
+REPLACE non-reserved
+REPLICA non-reserved
+REQUIRING non-reserved non-reserved
+RESET non-reserved
+RESPECT non-reserved non-reserved
+RESTART non-reserved non-reserved non-reserved
+RESTORE non-reserved non-reserved
+RESTRICT non-reserved non-reserved non-reserved reserved
+RESULT reserved reserved
+RETURN reserved reserved
+RETURNED_CARDINALITY non-reserved non-reserved
+RETURNED_LENGTH non-reserved non-reserved non-reserved
+RETURNED_OCTET_LENGTH non-reserved non-reserved non-reserved
+RETURNED_SQLSTATE non-reserved non-reserved non-reserved
+RETURNING reserved non-reserved non-reserved
+RETURNS non-reserved reserved reserved
+REVOKE non-reserved reserved reserved reserved
+RIGHT reserved (can be function or type) reserved reserved reserved
+ROLE non-reserved non-reserved non-reserved
+ROLLBACK non-reserved reserved reserved reserved
+ROLLUP reserved reserved
+ROUTINE non-reserved non-reserved
+ROUTINE_CATALOG non-reserved non-reserved
+ROUTINE_NAME non-reserved non-reserved
+ROUTINE_SCHEMA non-reserved non-reserved
+ROW non-reserved (cannot be function or type) reserved reserved
+ROWS non-reserved reserved reserved reserved
+ROW_COUNT non-reserved non-reserved non-reserved
+ROW_NUMBER reserved reserved
+RULE non-reserved
+SAVEPOINT non-reserved reserved reserved
+SCALE non-reserved non-reserved non-reserved
+SCHEMA non-reserved non-reserved non-reserved reserved
+SCHEMA_NAME non-reserved non-reserved non-reserved
+SCOPE reserved reserved
+SCOPE_CATALOG non-reserved non-reserved
+SCOPE_NAME non-reserved non-reserved
+SCOPE_SCHEMA non-reserved non-reserved
+SCROLL non-reserved reserved reserved reserved
+SEARCH non-reserved reserved reserved
+SECOND non-reserved reserved reserved reserved
+SECTION non-reserved non-reserved reserved
+SECURITY non-reserved non-reserved non-reserved
+SELECT reserved reserved reserved reserved
+SELECTIVE non-reserved non-reserved
+SELF non-reserved non-reserved
+SENSITIVE reserved reserved
+SEQUENCE non-reserved non-reserved non-reserved
+SEQUENCES non-reserved
+SERIALIZABLE non-reserved non-reserved non-reserved non-reserved
+SERVER non-reserved non-reserved non-reserved
+SERVER_NAME non-reserved non-reserved non-reserved
+SESSION non-reserved non-reserved non-reserved reserved
+SESSION_USER reserved reserved reserved reserved
+SET non-reserved reserved reserved reserved
+SETOF non-reserved (cannot be function or type)
+SETS non-reserved non-reserved
+SHARE non-reserved
+SHOW non-reserved
+SIMILAR reserved (can be function or type) reserved reserved
+SIMPLE non-reserved non-reserved non-reserved
+SIZE non-reserved non-reserved reserved
+SMALLINT non-reserved (cannot be function or type) reserved reserved reserved
+SNAPSHOT non-reserved
+SOME reserved reserved reserved reserved
+SOURCE non-reserved non-reserved
+SPACE non-reserved non-reserved reserved
+SPECIFIC reserved reserved
+SPECIFICTYPE reserved reserved
+SPECIFIC_NAME non-reserved non-reserved
+SQL reserved reserved reserved
+SQLCODE reserved
+SQLERROR reserved
+SQLEXCEPTION reserved reserved
+SQLSTATE reserved reserved reserved
+SQLWARNING reserved reserved
+SQRT reserved reserved
+STABLE non-reserved
+STANDALONE non-reserved non-reserved non-reserved
+START non-reserved reserved reserved
+STATE non-reserved non-reserved
+STATEMENT non-reserved non-reserved non-reserved
+STATIC reserved reserved
+STATISTICS non-reserved
+STDDEV_POP reserved reserved
+STDDEV_SAMP reserved reserved
+STDIN non-reserved
+STDOUT non-reserved
+STORAGE non-reserved
+STRICT non-reserved
+STRIP non-reserved non-reserved non-reserved
+STRUCTURE non-reserved non-reserved
+STYLE non-reserved non-reserved
+SUBCLASS_ORIGIN non-reserved non-reserved non-reserved
+SUBMULTISET reserved reserved
+SUBSTRING non-reserved (cannot be function or type) reserved reserved reserved
+SUBSTRING_REGEX reserved reserved
+SUCCEEDS reserved
+SUM reserved reserved reserved
+SYMMETRIC reserved reserved reserved
+SYSID non-reserved
+SYSTEM non-reserved reserved reserved
+SYSTEM_TIME reserved
+SYSTEM_USER reserved reserved reserved
+T non-reserved non-reserved
+TABLE reserved reserved reserved reserved
+TABLES non-reserved
+TABLESAMPLE reserved reserved
+TABLESPACE non-reserved
+TABLE_NAME non-reserved non-reserved non-reserved
+TEMP non-reserved
+TEMPLATE non-reserved
+TEMPORARY non-reserved non-reserved non-reserved reserved
+TEXT non-reserved
+THEN reserved reserved reserved reserved
+TIES non-reserved non-reserved
+TIME non-reserved (cannot be function or type) reserved reserved reserved
+TIMESTAMP non-reserved (cannot be function or type) reserved reserved reserved
+TIMEZONE_HOUR reserved reserved reserved
+TIMEZONE_MINUTE reserved reserved reserved
+TO reserved reserved reserved reserved
+TOKEN non-reserved non-reserved
+TOP_LEVEL_COUNT non-reserved non-reserved
+TRAILING reserved reserved reserved reserved
+TRANSACTION non-reserved non-reserved non-reserved reserved
+TRANSACTIONS_COMMITTED non-reserved non-reserved
+TRANSACTIONS_ROLLED_BACK non-reserved non-reserved
+TRANSACTION_ACTIVE non-reserved non-reserved
+TRANSFORM non-reserved non-reserved
+TRANSFORMS non-reserved non-reserved
+TRANSLATE reserved reserved reserved
+TRANSLATE_REGEX reserved reserved
+TRANSLATION reserved reserved reserved
+TREAT non-reserved (cannot be function or type) reserved reserved
+TRIGGER non-reserved reserved reserved
+TRIGGER_CATALOG non-reserved non-reserved
+TRIGGER_NAME non-reserved non-reserved
+TRIGGER_SCHEMA non-reserved non-reserved
+TRIM non-reserved (cannot be function or type) reserved reserved reserved
+TRIM_ARRAY reserved reserved
+TRUE reserved reserved reserved reserved
+TRUNCATE non-reserved reserved reserved
+TRUSTED non-reserved
+TYPE non-reserved non-reserved non-reserved non-reserved
+TYPES non-reserved
+UESCAPE reserved reserved
+UNBOUNDED non-reserved non-reserved non-reserved
+UNCOMMITTED non-reserved non-reserved non-reserved non-reserved
+UNDER non-reserved non-reserved
+UNENCRYPTED non-reserved
+UNION reserved reserved reserved reserved
+UNIQUE reserved reserved reserved reserved
+UNKNOWN non-reserved reserved reserved reserved
+UNLINK non-reserved non-reserved
+UNLISTEN non-reserved
+UNLOGGED non-reserved
+UNNAMED non-reserved non-reserved non-reserved
+UNNEST reserved reserved
+UNTIL non-reserved
+UNTYPED non-reserved non-reserved
+UPDATE non-reserved reserved reserved reserved
+UPPER reserved reserved reserved
+URI non-reserved non-reserved
+USAGE non-reserved non-reserved reserved
+USER reserved reserved reserved reserved
+USER_DEFINED_TYPE_CATALOG non-reserved non-reserved
+USER_DEFINED_TYPE_CODE non-reserved non-reserved
+USER_DEFINED_TYPE_NAME non-reserved non-reserved
+USER_DEFINED_TYPE_SCHEMA non-reserved non-reserved
+USING reserved reserved reserved reserved
+VACUUM non-reserved
+VALID non-reserved non-reserved non-reserved
+VALIDATE non-reserved
+VALIDATOR non-reserved
+VALUE non-reserved reserved reserved reserved
+VALUES non-reserved (cannot be function or type) reserved reserved reserved
+VALUE_OF reserved
+VARBINARY reserved reserved
+VARCHAR non-reserved (cannot be function or type) reserved reserved reserved
+VARIADIC reserved
+VARYING non-reserved reserved reserved reserved
+VAR_POP reserved reserved
+VAR_SAMP reserved reserved
+VERBOSE reserved (can be function or type)
+VERSION non-reserved non-reserved non-reserved
+VERSIONING reserved
+VIEW non-reserved non-reserved non-reserved reserved
+VOLATILE non-reserved
+WHEN reserved reserved reserved reserved
+WHENEVER reserved reserved reserved
+WHERE reserved reserved reserved reserved
+WHITESPACE non-reserved non-reserved non-reserved
+WIDTH_BUCKET reserved reserved
+WINDOW reserved reserved reserved
+WITH reserved reserved reserved reserved
+WITHIN reserved reserved
+WITHOUT non-reserved reserved reserved
+WORK non-reserved non-reserved non-reserved reserved
+WRAPPER non-reserved non-reserved non-reserved
+WRITE non-reserved non-reserved non-reserved reserved
+XML non-reserved reserved reserved
+XMLAGG reserved reserved
+XMLATTRIBUTES non-reserved (cannot be function or type) reserved reserved
+XMLBINARY reserved reserved
+XMLCAST reserved reserved
+XMLCOMMENT reserved reserved
+XMLCONCAT non-reserved (cannot be function or type) reserved reserved
+XMLDECLARATION non-reserved non-reserved
+XMLDOCUMENT reserved reserved
+XMLELEMENT non-reserved (cannot be function or type) reserved reserved
+XMLEXISTS non-reserved (cannot be function or type) reserved reserved
+XMLFOREST non-reserved (cannot be function or type) reserved reserved
+XMLITERATE reserved reserved
+XMLNAMESPACES reserved reserved
+XMLPARSE non-reserved (cannot be function or type) reserved reserved
+XMLPI non-reserved (cannot be function or type) reserved reserved
+XMLQUERY reserved reserved
+XMLROOT non-reserved (cannot be function or type)
+XMLSCHEMA non-reserved non-reserved
+XMLSERIALIZE non-reserved (cannot be function or type) reserved reserved
+XMLTABLE reserved reserved
+XMLTEXT reserved reserved
+XMLVALIDATE reserved reserved
+YEAR non-reserved reserved reserved reserved
+YES non-reserved non-reserved non-reserved
+ZONE non-reserved non-reserved non-reserved reserved
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/processors.go b/vendor/github.com/go-xorm/xorm/processors.go
new file mode 100644
index 0000000..dcd9c6a
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/processors.go
@@ -0,0 +1,78 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+// BeforeInsertProcessor executed before an object is initially persisted to the database
+type BeforeInsertProcessor interface {
+ BeforeInsert()
+}
+
+// BeforeUpdateProcessor executed before an object is updated
+type BeforeUpdateProcessor interface {
+ BeforeUpdate()
+}
+
+// BeforeDeleteProcessor executed before an object is deleted
+type BeforeDeleteProcessor interface {
+ BeforeDelete()
+}
+
+// BeforeSetProcessor executed before data set to the struct fields
+type BeforeSetProcessor interface {
+ BeforeSet(string, Cell)
+}
+
+// AfterSetProcessor executed after data set to the struct fields
+type AfterSetProcessor interface {
+ AfterSet(string, Cell)
+}
+
+// AfterInsertProcessor executed after an object is persisted to the database
+type AfterInsertProcessor interface {
+ AfterInsert()
+}
+
+// AfterUpdateProcessor executed after an object has been updated
+type AfterUpdateProcessor interface {
+ AfterUpdate()
+}
+
+// AfterDeleteProcessor executed after an object has been deleted
+type AfterDeleteProcessor interface {
+ AfterDelete()
+}
+
+// AfterLoadProcessor executed after an ojbect has been loaded from database
+type AfterLoadProcessor interface {
+ AfterLoad()
+}
+
+// AfterLoadSessionProcessor executed after an ojbect has been loaded from database with session parameter
+type AfterLoadSessionProcessor interface {
+ AfterLoad(*Session)
+}
+
+type executedProcessorFunc func(*Session, interface{}) error
+
+type executedProcessor struct {
+ fun executedProcessorFunc
+ session *Session
+ bean interface{}
+}
+
+func (executor *executedProcessor) execute() error {
+ return executor.fun(executor.session, executor.bean)
+}
+
+func (session *Session) executeProcessors() error {
+ processors := session.afterProcessors
+ session.afterProcessors = make([]executedProcessor, 0)
+ for _, processor := range processors {
+ if err := processor.execute(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/rows.go b/vendor/github.com/go-xorm/xorm/rows.go
new file mode 100644
index 0000000..bdd4458
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/rows.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql"
+ "fmt"
+ "reflect"
+
+ "xorm.io/core"
+)
+
+// Rows rows wrapper a rows to
+type Rows struct {
+ session *Session
+ rows *core.Rows
+ beanType reflect.Type
+ lastError error
+}
+
+func newRows(session *Session, bean interface{}) (*Rows, error) {
+ rows := new(Rows)
+ rows.session = session
+ rows.beanType = reflect.Indirect(reflect.ValueOf(bean)).Type()
+
+ var sqlStr string
+ var args []interface{}
+ var err error
+
+ if err = rows.session.statement.setRefBean(bean); err != nil {
+ return nil, err
+ }
+
+ if len(session.statement.TableName()) <= 0 {
+ return nil, ErrTableNotFound
+ }
+
+ if rows.session.statement.RawSQL == "" {
+ sqlStr, args, err = rows.session.statement.genGetSQL(bean)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ sqlStr = rows.session.statement.RawSQL
+ args = rows.session.statement.RawParams
+ }
+
+ rows.rows, err = rows.session.queryRows(sqlStr, args...)
+ if err != nil {
+ rows.lastError = err
+ rows.Close()
+ return nil, err
+ }
+
+ return rows, nil
+}
+
+// Next move cursor to next record, return false if end has reached
+func (rows *Rows) Next() bool {
+ if rows.lastError == nil && rows.rows != nil {
+ hasNext := rows.rows.Next()
+ if !hasNext {
+ rows.lastError = sql.ErrNoRows
+ }
+ return hasNext
+ }
+ return false
+}
+
+// Err returns the error, if any, that was encountered during iteration. Err may be called after an explicit or implicit Close.
+func (rows *Rows) Err() error {
+ return rows.lastError
+}
+
+// Scan row record to bean properties
+func (rows *Rows) Scan(bean interface{}) error {
+ if rows.lastError != nil {
+ return rows.lastError
+ }
+
+ if reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType {
+ return fmt.Errorf("scan arg is incompatible type to [%v]", rows.beanType)
+ }
+
+ if err := rows.session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+
+ fields, err := rows.rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ scanResults, err := rows.session.row2Slice(rows.rows, fields, bean)
+ if err != nil {
+ return err
+ }
+
+ dataStruct := rValue(bean)
+ _, err = rows.session.slice2Bean(scanResults, fields, bean, &dataStruct, rows.session.statement.RefTable)
+ if err != nil {
+ return err
+ }
+
+ return rows.session.executeProcessors()
+}
+
+// Close session if session.IsAutoClose is true, and claimed any opened resources
+func (rows *Rows) Close() error {
+ if rows.session.isAutoClose {
+ defer rows.session.Close()
+ }
+
+ if rows.rows != nil {
+ return rows.rows.Close()
+ }
+
+ return rows.lastError
+}
diff --git a/vendor/github.com/go-xorm/xorm/session.go b/vendor/github.com/go-xorm/xorm/session.go
new file mode 100644
index 0000000..b33955f
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session.go
@@ -0,0 +1,866 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "reflect"
+ "strings"
+ "time"
+
+ "xorm.io/core"
+)
+
+type sessionType int
+
+const (
+ engineSession sessionType = iota
+ groupSession
+)
+
+// Session keep a pointer to sql.DB and provides all execution of all
+// kind of database operations.
+type Session struct {
+ db *core.DB
+ engine *Engine
+ tx *core.Tx
+ statement Statement
+ isAutoCommit bool
+ isCommitedOrRollbacked bool
+ isAutoClose bool
+
+ // Automatically reset the statement after operations that execute a SQL
+ // query such as Count(), Find(), Get(), ...
+ autoResetStatement bool
+
+ // !nashtsai! storing these beans due to yet committed tx
+ afterInsertBeans map[interface{}]*[]func(interface{})
+ afterUpdateBeans map[interface{}]*[]func(interface{})
+ afterDeleteBeans map[interface{}]*[]func(interface{})
+ // --
+
+ beforeClosures []func(interface{})
+ afterClosures []func(interface{})
+
+ afterProcessors []executedProcessor
+
+ prepareStmt bool
+ stmtCache map[uint32]*core.Stmt //key: hash.Hash32 of (queryStr, len(queryStr))
+
+ // !evalphobia! stored the last executed query on this session
+ //beforeSQLExec func(string, ...interface{})
+ lastSQL string
+ lastSQLArgs []interface{}
+
+ ctx context.Context
+ sessionType sessionType
+}
+
+// Clone copy all the session's content and return a new session
+func (session *Session) Clone() *Session {
+ var sess = *session
+ return &sess
+}
+
+// Init reset the session as the init status.
+func (session *Session) Init() {
+ session.statement.Init()
+ session.statement.Engine = session.engine
+ session.isAutoCommit = true
+ session.isCommitedOrRollbacked = false
+ session.isAutoClose = false
+ session.autoResetStatement = true
+ session.prepareStmt = false
+
+ // !nashtsai! is lazy init better?
+ session.afterInsertBeans = make(map[interface{}]*[]func(interface{}), 0)
+ session.afterUpdateBeans = make(map[interface{}]*[]func(interface{}), 0)
+ session.afterDeleteBeans = make(map[interface{}]*[]func(interface{}), 0)
+ session.beforeClosures = make([]func(interface{}), 0)
+ session.afterClosures = make([]func(interface{}), 0)
+ session.stmtCache = make(map[uint32]*core.Stmt)
+
+ session.afterProcessors = make([]executedProcessor, 0)
+
+ session.lastSQL = ""
+ session.lastSQLArgs = []interface{}{}
+
+ session.ctx = session.engine.defaultContext
+}
+
+// Close release the connection from pool
+func (session *Session) Close() {
+ for _, v := range session.stmtCache {
+ v.Close()
+ }
+
+ if session.db != nil {
+ // When Close be called, if session is a transaction and do not call
+ // Commit or Rollback, then call Rollback.
+ if session.tx != nil && !session.isCommitedOrRollbacked {
+ session.Rollback()
+ }
+ session.tx = nil
+ session.stmtCache = nil
+ session.db = nil
+ }
+}
+
+// ContextCache enable context cache or not
+func (session *Session) ContextCache(context ContextCache) *Session {
+ session.statement.context = context
+ return session
+}
+
+// IsClosed returns if session is closed
+func (session *Session) IsClosed() bool {
+ return session.db == nil
+}
+
+func (session *Session) resetStatement() {
+ if session.autoResetStatement {
+ session.statement.Init()
+ }
+}
+
+// Prepare set a flag to session that should be prepare statement before execute query
+func (session *Session) Prepare() *Session {
+ session.prepareStmt = true
+ return session
+}
+
+// Before Apply before Processor, affected bean is passed to closure arg
+func (session *Session) Before(closures func(interface{})) *Session {
+ if closures != nil {
+ session.beforeClosures = append(session.beforeClosures, closures)
+ }
+ return session
+}
+
+// After Apply after Processor, affected bean is passed to closure arg
+func (session *Session) After(closures func(interface{})) *Session {
+ if closures != nil {
+ session.afterClosures = append(session.afterClosures, closures)
+ }
+ return session
+}
+
+// Table can input a string or pointer to struct for special a table to operate.
+func (session *Session) Table(tableNameOrBean interface{}) *Session {
+ session.statement.Table(tableNameOrBean)
+ return session
+}
+
+// Alias set the table alias
+func (session *Session) Alias(alias string) *Session {
+ session.statement.Alias(alias)
+ return session
+}
+
+// NoCascade indicate that no cascade load child object
+func (session *Session) NoCascade() *Session {
+ session.statement.UseCascade = false
+ return session
+}
+
+// ForUpdate Set Read/Write locking for UPDATE
+func (session *Session) ForUpdate() *Session {
+ session.statement.IsForUpdate = true
+ return session
+}
+
+// NoAutoCondition disable generate SQL condition from beans
+func (session *Session) NoAutoCondition(no ...bool) *Session {
+ session.statement.NoAutoCondition(no...)
+ return session
+}
+
+// Limit provide limit and offset query condition
+func (session *Session) Limit(limit int, start ...int) *Session {
+ session.statement.Limit(limit, start...)
+ return session
+}
+
+// OrderBy provide order by query condition, the input parameter is the content
+// after order by on a sql statement.
+func (session *Session) OrderBy(order string) *Session {
+ session.statement.OrderBy(order)
+ return session
+}
+
+// Desc provide desc order by query condition, the input parameters are columns.
+func (session *Session) Desc(colNames ...string) *Session {
+ session.statement.Desc(colNames...)
+ return session
+}
+
+// Asc provide asc order by query condition, the input parameters are columns.
+func (session *Session) Asc(colNames ...string) *Session {
+ session.statement.Asc(colNames...)
+ return session
+}
+
+// StoreEngine is only avialble mysql dialect currently
+func (session *Session) StoreEngine(storeEngine string) *Session {
+ session.statement.StoreEngine = storeEngine
+ return session
+}
+
+// Charset is only avialble mysql dialect currently
+func (session *Session) Charset(charset string) *Session {
+ session.statement.Charset = charset
+ return session
+}
+
+// Cascade indicates if loading sub Struct
+func (session *Session) Cascade(trueOrFalse ...bool) *Session {
+ if len(trueOrFalse) >= 1 {
+ session.statement.UseCascade = trueOrFalse[0]
+ }
+ return session
+}
+
+// NoCache ask this session do not retrieve data from cache system and
+// get data from database directly.
+func (session *Session) NoCache() *Session {
+ session.statement.UseCache = false
+ return session
+}
+
+// Join join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN
+func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {
+ session.statement.Join(joinOperator, tablename, condition, args...)
+ return session
+}
+
+// GroupBy Generate Group By statement
+func (session *Session) GroupBy(keys string) *Session {
+ session.statement.GroupBy(keys)
+ return session
+}
+
+// Having Generate Having statement
+func (session *Session) Having(conditions string) *Session {
+ session.statement.Having(conditions)
+ return session
+}
+
+// DB db return the wrapper of sql.DB
+func (session *Session) DB() *core.DB {
+ if session.db == nil {
+ session.db = session.engine.db
+ session.stmtCache = make(map[uint32]*core.Stmt, 0)
+ }
+ return session.db
+}
+
+func cleanupProcessorsClosures(slices *[]func(interface{})) {
+ if len(*slices) > 0 {
+ *slices = make([]func(interface{}), 0)
+ }
+}
+
+func (session *Session) canCache() bool {
+ if session.statement.RefTable == nil ||
+ session.statement.JoinStr != "" ||
+ session.statement.RawSQL != "" ||
+ !session.statement.UseCache ||
+ session.statement.IsForUpdate ||
+ session.tx != nil ||
+ len(session.statement.selectStr) > 0 {
+ return false
+ }
+ return true
+}
+
+func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, err error) {
+ crc := crc32.ChecksumIEEE([]byte(sqlStr))
+ // TODO try hash(sqlStr+len(sqlStr))
+ var has bool
+ stmt, has = session.stmtCache[crc]
+ if !has {
+ stmt, err = db.PrepareContext(session.ctx, sqlStr)
+ if err != nil {
+ return nil, err
+ }
+ session.stmtCache[crc] = stmt
+ }
+ return
+}
+
+func (session *Session) getField(dataStruct *reflect.Value, key string, table *core.Table, idx int) (*reflect.Value, error) {
+ var col *core.Column
+ if col = table.GetColumnIdx(key, idx); col == nil {
+ return nil, ErrFieldIsNotExist{key, table.Name}
+ }
+
+ fieldValue, err := col.ValueOfV(dataStruct)
+ if err != nil {
+ return nil, err
+ }
+
+ if !fieldValue.IsValid() || !fieldValue.CanSet() {
+ return nil, ErrFieldIsNotValid{key, table.Name}
+ }
+
+ return fieldValue, nil
+}
+
+// Cell cell is a result of one column field
+type Cell *interface{}
+
+func (session *Session) rows2Beans(rows *core.Rows, fields []string,
+ table *core.Table, newElemFunc func([]string) reflect.Value,
+ sliceValueSetFunc func(*reflect.Value, core.PK) error) error {
+ for rows.Next() {
+ var newValue = newElemFunc(fields)
+ bean := newValue.Interface()
+ dataStruct := newValue.Elem()
+
+ // handle beforeClosures
+ scanResults, err := session.row2Slice(rows, fields, bean)
+ if err != nil {
+ return err
+ }
+ pk, err := session.slice2Bean(scanResults, fields, bean, &dataStruct, table)
+ if err != nil {
+ return err
+ }
+ session.afterProcessors = append(session.afterProcessors, executedProcessor{
+ fun: func(*Session, interface{}) error {
+ return sliceValueSetFunc(&newValue, pk)
+ },
+ session: session,
+ bean: bean,
+ })
+ }
+ return nil
+}
+
+func (session *Session) row2Slice(rows *core.Rows, fields []string, bean interface{}) ([]interface{}, error) {
+ for _, closure := range session.beforeClosures {
+ closure(bean)
+ }
+
+ scanResults := make([]interface{}, len(fields))
+ for i := 0; i < len(fields); i++ {
+ var cell interface{}
+ scanResults[i] = &cell
+ }
+ if err := rows.Scan(scanResults...); err != nil {
+ return nil, err
+ }
+
+ if b, hasBeforeSet := bean.(BeforeSetProcessor); hasBeforeSet {
+ for ii, key := range fields {
+ b.BeforeSet(key, Cell(scanResults[ii].(*interface{})))
+ }
+ }
+ return scanResults, nil
+}
+
+func (session *Session) slice2Bean(scanResults []interface{}, fields []string, bean interface{}, dataStruct *reflect.Value, table *core.Table) (core.PK, error) {
+ defer func() {
+ if b, hasAfterSet := bean.(AfterSetProcessor); hasAfterSet {
+ for ii, key := range fields {
+ b.AfterSet(key, Cell(scanResults[ii].(*interface{})))
+ }
+ }
+ }()
+
+ // handle afterClosures
+ for _, closure := range session.afterClosures {
+ session.afterProcessors = append(session.afterProcessors, executedProcessor{
+ fun: func(sess *Session, bean interface{}) error {
+ closure(bean)
+ return nil
+ },
+ session: session,
+ bean: bean,
+ })
+ }
+
+ if a, has := bean.(AfterLoadProcessor); has {
+ session.afterProcessors = append(session.afterProcessors, executedProcessor{
+ fun: func(sess *Session, bean interface{}) error {
+ a.AfterLoad()
+ return nil
+ },
+ session: session,
+ bean: bean,
+ })
+ }
+
+ if a, has := bean.(AfterLoadSessionProcessor); has {
+ session.afterProcessors = append(session.afterProcessors, executedProcessor{
+ fun: func(sess *Session, bean interface{}) error {
+ a.AfterLoad(sess)
+ return nil
+ },
+ session: session,
+ bean: bean,
+ })
+ }
+
+ var tempMap = make(map[string]int)
+ var pk core.PK
+ for ii, key := range fields {
+ var idx int
+ var ok bool
+ var lKey = strings.ToLower(key)
+ if idx, ok = tempMap[lKey]; !ok {
+ idx = 0
+ } else {
+ idx = idx + 1
+ }
+ tempMap[lKey] = idx
+
+ fieldValue, err := session.getField(dataStruct, key, table, idx)
+ if err != nil {
+ if !strings.Contains(err.Error(), "is not valid") {
+ session.engine.logger.Warn(err)
+ }
+ continue
+ }
+ if fieldValue == nil {
+ continue
+ }
+ rawValue := reflect.Indirect(reflect.ValueOf(scanResults[ii]))
+
+ // if row is null then ignore
+ if rawValue.Interface() == nil {
+ continue
+ }
+
+ if fieldValue.CanAddr() {
+ if structConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok {
+ if data, err := value2Bytes(&rawValue); err == nil {
+ if err := structConvert.FromDB(data); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ continue
+ }
+ }
+
+ if _, ok := fieldValue.Interface().(core.Conversion); ok {
+ if data, err := value2Bytes(&rawValue); err == nil {
+ if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() {
+ fieldValue.Set(reflect.New(fieldValue.Type().Elem()))
+ }
+ fieldValue.Interface().(core.Conversion).FromDB(data)
+ } else {
+ return nil, err
+ }
+ continue
+ }
+
+ rawValueType := reflect.TypeOf(rawValue.Interface())
+ vv := reflect.ValueOf(rawValue.Interface())
+ col := table.GetColumnIdx(key, idx)
+ if col.IsPrimaryKey {
+ pk = append(pk, rawValue.Interface())
+ }
+ fieldType := fieldValue.Type()
+ hasAssigned := false
+
+ if col.SQLType.IsJson() {
+ var bs []byte
+ if rawValueType.Kind() == reflect.String {
+ bs = []byte(vv.String())
+ } else if rawValueType.ConvertibleTo(core.BytesType) {
+ bs = vv.Bytes()
+ } else {
+ return nil, fmt.Errorf("unsupported database data type: %s %v", key, rawValueType.Kind())
+ }
+
+ hasAssigned = true
+
+ if len(bs) > 0 {
+ if fieldType.Kind() == reflect.String {
+ fieldValue.SetString(string(bs))
+ continue
+ }
+ if fieldValue.CanAddr() {
+ err := DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface())
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ x := reflect.New(fieldType)
+ err := DefaultJSONHandler.Unmarshal(bs, x.Interface())
+ if err != nil {
+ return nil, err
+ }
+ fieldValue.Set(x.Elem())
+ }
+ }
+
+ continue
+ }
+
+ switch fieldType.Kind() {
+ case reflect.Complex64, reflect.Complex128:
+ // TODO: reimplement this
+ var bs []byte
+ if rawValueType.Kind() == reflect.String {
+ bs = []byte(vv.String())
+ } else if rawValueType.ConvertibleTo(core.BytesType) {
+ bs = vv.Bytes()
+ }
+
+ hasAssigned = true
+ if len(bs) > 0 {
+ if fieldValue.CanAddr() {
+ err := DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface())
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ x := reflect.New(fieldType)
+ err := DefaultJSONHandler.Unmarshal(bs, x.Interface())
+ if err != nil {
+ return nil, err
+ }
+ fieldValue.Set(x.Elem())
+ }
+ }
+ case reflect.Slice, reflect.Array:
+ switch rawValueType.Kind() {
+ case reflect.Slice, reflect.Array:
+ switch rawValueType.Elem().Kind() {
+ case reflect.Uint8:
+ if fieldType.Elem().Kind() == reflect.Uint8 {
+ hasAssigned = true
+ if col.SQLType.IsText() {
+ x := reflect.New(fieldType)
+ err := DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface())
+ if err != nil {
+ return nil, err
+ }
+ fieldValue.Set(x.Elem())
+ } else {
+ if fieldValue.Len() > 0 {
+ for i := 0; i < fieldValue.Len(); i++ {
+ if i < vv.Len() {
+ fieldValue.Index(i).Set(vv.Index(i))
+ }
+ }
+ } else {
+ for i := 0; i < vv.Len(); i++ {
+ fieldValue.Set(reflect.Append(*fieldValue, vv.Index(i)))
+ }
+ }
+ }
+ }
+ }
+ }
+ case reflect.String:
+ if rawValueType.Kind() == reflect.String {
+ hasAssigned = true
+ fieldValue.SetString(vv.String())
+ }
+ case reflect.Bool:
+ if rawValueType.Kind() == reflect.Bool {
+ hasAssigned = true
+ fieldValue.SetBool(vv.Bool())
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch rawValueType.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ hasAssigned = true
+ fieldValue.SetInt(vv.Int())
+ }
+ case reflect.Float32, reflect.Float64:
+ switch rawValueType.Kind() {
+ case reflect.Float32, reflect.Float64:
+ hasAssigned = true
+ fieldValue.SetFloat(vv.Float())
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ switch rawValueType.Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ hasAssigned = true
+ fieldValue.SetUint(vv.Uint())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ hasAssigned = true
+ fieldValue.SetUint(uint64(vv.Int()))
+ }
+ case reflect.Struct:
+ if fieldType.ConvertibleTo(core.TimeType) {
+ dbTZ := session.engine.DatabaseTZ
+ if col.TimeZone != nil {
+ dbTZ = col.TimeZone
+ }
+
+ if rawValueType == core.TimeType {
+ hasAssigned = true
+
+ t := vv.Convert(core.TimeType).Interface().(time.Time)
+
+ z, _ := t.Zone()
+ // set new location if database don't save timezone or give an incorrect timezone
+ if len(z) == 0 || t.Year() == 0 || t.Location().String() != dbTZ.String() { // !nashtsai! HACK tmp work around for lib/pq doesn't properly time with location
+ session.engine.logger.Debugf("empty zone key[%v] : %v | zone: %v | location: %+v\n", key, t, z, *t.Location())
+ t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(),
+ t.Minute(), t.Second(), t.Nanosecond(), dbTZ)
+ }
+
+ t = t.In(session.engine.TZLocation)
+ fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
+ } else if rawValueType == core.IntType || rawValueType == core.Int64Type ||
+ rawValueType == core.Int32Type {
+ hasAssigned = true
+
+ t := time.Unix(vv.Int(), 0).In(session.engine.TZLocation)
+ fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
+ } else {
+ if d, ok := vv.Interface().([]uint8); ok {
+ hasAssigned = true
+ t, err := session.byte2Time(col, d)
+ if err != nil {
+ session.engine.logger.Error("byte2Time error:", err.Error())
+ hasAssigned = false
+ } else {
+ fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
+ }
+ } else if d, ok := vv.Interface().(string); ok {
+ hasAssigned = true
+ t, err := session.str2Time(col, d)
+ if err != nil {
+ session.engine.logger.Error("byte2Time error:", err.Error())
+ hasAssigned = false
+ } else {
+ fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
+ }
+ } else {
+ return nil, fmt.Errorf("rawValueType is %v, value is %v", rawValueType, vv.Interface())
+ }
+ }
+ } else if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok {
+ // !! 增加支持sql.Scanner接口的结构,如sql.NullString
+ hasAssigned = true
+ if err := nulVal.Scan(vv.Interface()); err != nil {
+ session.engine.logger.Error("sql.Sanner error:", err.Error())
+ hasAssigned = false
+ }
+ } else if col.SQLType.IsJson() {
+ if rawValueType.Kind() == reflect.String {
+ hasAssigned = true
+ x := reflect.New(fieldType)
+ if len([]byte(vv.String())) > 0 {
+ err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), x.Interface())
+ if err != nil {
+ return nil, err
+ }
+ fieldValue.Set(x.Elem())
+ }
+ } else if rawValueType.Kind() == reflect.Slice {
+ hasAssigned = true
+ x := reflect.New(fieldType)
+ if len(vv.Bytes()) > 0 {
+ err := DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface())
+ if err != nil {
+ return nil, err
+ }
+ fieldValue.Set(x.Elem())
+ }
+ }
+ } else if session.statement.UseCascade {
+ table, err := session.engine.autoMapType(*fieldValue)
+ if err != nil {
+ return nil, err
+ }
+
+ hasAssigned = true
+ if len(table.PrimaryKeys) != 1 {
+ return nil, errors.New("unsupported non or composited primary key cascade")
+ }
+ var pk = make(core.PK, len(table.PrimaryKeys))
+ pk[0], err = asKind(vv, rawValueType)
+ if err != nil {
+ return nil, err
+ }
+
+ if !isPKZero(pk) {
+ // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch
+ // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne
+ // property to be fetched lazily
+ structInter := reflect.New(fieldValue.Type())
+ has, err := session.ID(pk).NoCascade().get(structInter.Interface())
+ if err != nil {
+ return nil, err
+ }
+ if has {
+ fieldValue.Set(structInter.Elem())
+ } else {
+ return nil, errors.New("cascade obj is not exist")
+ }
+ }
+ }
+ case reflect.Ptr:
+ // !nashtsai! TODO merge duplicated codes above
+ switch fieldType {
+ // following types case matching ptr's native type, therefore assign ptr directly
+ case core.PtrStringType:
+ if rawValueType.Kind() == reflect.String {
+ x := vv.String()
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrBoolType:
+ if rawValueType.Kind() == reflect.Bool {
+ x := vv.Bool()
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrTimeType:
+ if rawValueType == core.PtrTimeType {
+ hasAssigned = true
+ var x = rawValue.Interface().(time.Time)
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrFloat64Type:
+ if rawValueType.Kind() == reflect.Float64 {
+ x := vv.Float()
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrUint64Type:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = uint64(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrInt64Type:
+ if rawValueType.Kind() == reflect.Int64 {
+ x := vv.Int()
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrFloat32Type:
+ if rawValueType.Kind() == reflect.Float64 {
+ var x = float32(vv.Float())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrIntType:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = int(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrInt32Type:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = int32(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrInt8Type:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = int8(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrInt16Type:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = int16(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrUintType:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = uint(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.PtrUint32Type:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = uint32(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.Uint8Type:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = uint8(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.Uint16Type:
+ if rawValueType.Kind() == reflect.Int64 {
+ var x = uint16(vv.Int())
+ hasAssigned = true
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ case core.Complex64Type:
+ var x complex64
+ if len([]byte(vv.String())) > 0 {
+ err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x)
+ if err != nil {
+ return nil, err
+ }
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ hasAssigned = true
+ case core.Complex128Type:
+ var x complex128
+ if len([]byte(vv.String())) > 0 {
+ err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x)
+ if err != nil {
+ return nil, err
+ }
+ fieldValue.Set(reflect.ValueOf(&x))
+ }
+ hasAssigned = true
+ } // switch fieldType
+ } // switch fieldType.Kind()
+
+ // !nashtsai! for value can't be assigned directly fallback to convert to []byte then back to value
+ if !hasAssigned {
+ data, err := value2Bytes(&rawValue)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = session.bytes2Value(col, fieldValue, data); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return pk, nil
+}
+
+// saveLastSQL stores executed query information
+func (session *Session) saveLastSQL(sql string, args ...interface{}) {
+ session.lastSQL = sql
+ session.lastSQLArgs = args
+ session.engine.logSQL(sql, args...)
+}
+
+// LastSQL returns last query information
+func (session *Session) LastSQL() (string, []interface{}) {
+ return session.lastSQL, session.lastSQLArgs
+}
+
+// Unscoped always disable struct tag "deleted"
+func (session *Session) Unscoped() *Session {
+ session.statement.Unscoped()
+ return session
+}
+
+func (session *Session) incrVersionFieldValue(fieldValue *reflect.Value) {
+ switch fieldValue.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ fieldValue.SetInt(fieldValue.Int() + 1)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ fieldValue.SetUint(fieldValue.Uint() + 1)
+ }
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_cols.go b/vendor/github.com/go-xorm/xorm/session_cols.go
new file mode 100644
index 0000000..dc3befc
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_cols.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "reflect"
+ "strings"
+ "time"
+
+ "xorm.io/core"
+)
+
+type incrParam struct {
+ colName string
+ arg interface{}
+}
+
+type decrParam struct {
+ colName string
+ arg interface{}
+}
+
+type exprParam struct {
+ colName string
+ expr string
+}
+
+type columnMap []string
+
+func (m columnMap) contain(colName string) bool {
+ if len(m) == 0 {
+ return false
+ }
+
+ n := len(colName)
+ for _, mk := range m {
+ if len(mk) != n {
+ continue
+ }
+ if strings.EqualFold(mk, colName) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (m *columnMap) add(colName string) bool {
+ if m.contain(colName) {
+ return false
+ }
+ *m = append(*m, colName)
+ return true
+}
+
+func setColumnInt(bean interface{}, col *core.Column, t int64) {
+ v, err := col.ValueOf(bean)
+ if err != nil {
+ return
+ }
+ if v.CanSet() {
+ switch v.Type().Kind() {
+ case reflect.Int, reflect.Int64, reflect.Int32:
+ v.SetInt(t)
+ case reflect.Uint, reflect.Uint64, reflect.Uint32:
+ v.SetUint(uint64(t))
+ }
+ }
+}
+
+func setColumnTime(bean interface{}, col *core.Column, t time.Time) {
+ v, err := col.ValueOf(bean)
+ if err != nil {
+ return
+ }
+ if v.CanSet() {
+ switch v.Type().Kind() {
+ case reflect.Struct:
+ v.Set(reflect.ValueOf(t).Convert(v.Type()))
+ case reflect.Int, reflect.Int64, reflect.Int32:
+ v.SetInt(t.Unix())
+ case reflect.Uint, reflect.Uint64, reflect.Uint32:
+ v.SetUint(uint64(t.Unix()))
+ }
+ }
+}
+
+func getFlagForColumn(m map[string]bool, col *core.Column) (val bool, has bool) {
+ if len(m) == 0 {
+ return false, false
+ }
+
+ n := len(col.Name)
+
+ for mk := range m {
+ if len(mk) != n {
+ continue
+ }
+ if strings.EqualFold(mk, col.Name) {
+ return m[mk], true
+ }
+ }
+
+ return false, false
+}
+
+func col2NewCols(columns ...string) []string {
+ newColumns := make([]string, 0, len(columns))
+ for _, col := range columns {
+ col = strings.Replace(col, "`", "", -1)
+ col = strings.Replace(col, `"`, "", -1)
+ ccols := strings.Split(col, ",")
+ for _, c := range ccols {
+ newColumns = append(newColumns, strings.TrimSpace(c))
+ }
+ }
+ return newColumns
+}
+
+// Incr provides a query string like "count = count + 1"
+func (session *Session) Incr(column string, arg ...interface{}) *Session {
+ session.statement.Incr(column, arg...)
+ return session
+}
+
+// Decr provides a query string like "count = count - 1"
+func (session *Session) Decr(column string, arg ...interface{}) *Session {
+ session.statement.Decr(column, arg...)
+ return session
+}
+
+// SetExpr provides a query string like "column = {expression}"
+func (session *Session) SetExpr(column string, expression string) *Session {
+ session.statement.SetExpr(column, expression)
+ return session
+}
+
+// Select provides some columns to special
+func (session *Session) Select(str string) *Session {
+ session.statement.Select(str)
+ return session
+}
+
+// Cols provides some columns to special
+func (session *Session) Cols(columns ...string) *Session {
+ session.statement.Cols(columns...)
+ return session
+}
+
+// AllCols ask all columns
+func (session *Session) AllCols() *Session {
+ session.statement.AllCols()
+ return session
+}
+
+// MustCols specify some columns must use even if they are empty
+func (session *Session) MustCols(columns ...string) *Session {
+ session.statement.MustCols(columns...)
+ return session
+}
+
+// UseBool automatically retrieve condition according struct, but
+// if struct has bool field, it will ignore them. So use UseBool
+// to tell system to do not ignore them.
+// If no parameters, it will use all the bool field of struct, or
+// it will use parameters's columns
+func (session *Session) UseBool(columns ...string) *Session {
+ session.statement.UseBool(columns...)
+ return session
+}
+
+// Distinct use for distinct columns. Caution: when you are using cache,
+// distinct will not be cached because cache system need id,
+// but distinct will not provide id
+func (session *Session) Distinct(columns ...string) *Session {
+ session.statement.Distinct(columns...)
+ return session
+}
+
+// Omit Only not use the parameters as select or update columns
+func (session *Session) Omit(columns ...string) *Session {
+ session.statement.Omit(columns...)
+ return session
+}
+
+// Nullable Set null when column is zero-value and nullable for update
+func (session *Session) Nullable(columns ...string) *Session {
+ session.statement.Nullable(columns...)
+ return session
+}
+
+// NoAutoTime means do not automatically give created field and updated field
+// the current time on the current session temporarily
+func (session *Session) NoAutoTime() *Session {
+ session.statement.UseAutoTime = false
+ return session
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_cond.go b/vendor/github.com/go-xorm/xorm/session_cond.go
new file mode 100644
index 0000000..b16bdea
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_cond.go
@@ -0,0 +1,70 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import "xorm.io/builder"
+
+// Sql provides raw sql input parameter. When you have a complex SQL statement
+// and cannot use Where, Id, In and etc. Methods to describe, you can use SQL.
+//
+// Deprecated: use SQL instead.
+func (session *Session) Sql(query string, args ...interface{}) *Session {
+ return session.SQL(query, args...)
+}
+
+// SQL provides raw sql input parameter. When you have a complex SQL statement
+// and cannot use Where, Id, In and etc. Methods to describe, you can use SQL.
+func (session *Session) SQL(query interface{}, args ...interface{}) *Session {
+ session.statement.SQL(query, args...)
+ return session
+}
+
+// Where provides custom query condition.
+func (session *Session) Where(query interface{}, args ...interface{}) *Session {
+ session.statement.Where(query, args...)
+ return session
+}
+
+// And provides custom query condition.
+func (session *Session) And(query interface{}, args ...interface{}) *Session {
+ session.statement.And(query, args...)
+ return session
+}
+
+// Or provides custom query condition.
+func (session *Session) Or(query interface{}, args ...interface{}) *Session {
+ session.statement.Or(query, args...)
+ return session
+}
+
+// Id provides converting id as a query condition
+//
+// Deprecated: use ID instead
+func (session *Session) Id(id interface{}) *Session {
+ return session.ID(id)
+}
+
+// ID provides converting id as a query condition
+func (session *Session) ID(id interface{}) *Session {
+ session.statement.ID(id)
+ return session
+}
+
+// In provides a query string like "id in (1, 2, 3)"
+func (session *Session) In(column string, args ...interface{}) *Session {
+ session.statement.In(column, args...)
+ return session
+}
+
+// NotIn provides a query string like "id in (1, 2, 3)"
+func (session *Session) NotIn(column string, args ...interface{}) *Session {
+ session.statement.NotIn(column, args...)
+ return session
+}
+
+// Conds returns session query conditions except auto bean conditions
+func (session *Session) Conds() builder.Cond {
+ return session.statement.cond
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_context.go b/vendor/github.com/go-xorm/xorm/session_context.go
new file mode 100644
index 0000000..915f056
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_context.go
@@ -0,0 +1,23 @@
+// Copyright 2019 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import "context"
+
+// Context sets the context on this session
+func (session *Session) Context(ctx context.Context) *Session {
+ session.ctx = ctx
+ return session
+}
+
+// PingContext test if database is ok
+func (session *Session) PingContext(ctx context.Context) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName())
+ return session.DB().PingContext(ctx)
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_convert.go b/vendor/github.com/go-xorm/xorm/session_convert.go
new file mode 100644
index 0000000..caff5d2
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_convert.go
@@ -0,0 +1,661 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "xorm.io/core"
+)
+
+func (session *Session) str2Time(col *core.Column, data string) (outTime time.Time, outErr error) {
+ sdata := strings.TrimSpace(data)
+ var x time.Time
+ var err error
+
+ var parseLoc = session.engine.DatabaseTZ
+ if col.TimeZone != nil {
+ parseLoc = col.TimeZone
+ }
+
+ if sdata == zeroTime0 || sdata == zeroTime1 {
+ } else if !strings.ContainsAny(sdata, "- :") { // !nashtsai! has only found that mymysql driver is using this for time type column
+ // time stamp
+ sd, err := strconv.ParseInt(sdata, 10, 64)
+ if err == nil {
+ x = time.Unix(sd, 0)
+ //session.engine.logger.Debugf("time(0) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ } else {
+ //session.engine.logger.Debugf("time(0) err key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ }
+ } else if len(sdata) > 19 && strings.Contains(sdata, "-") {
+ x, err = time.ParseInLocation(time.RFC3339Nano, sdata, parseLoc)
+ session.engine.logger.Debugf("time(1) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ if err != nil {
+ x, err = time.ParseInLocation("2006-01-02 15:04:05.999999999", sdata, parseLoc)
+ //session.engine.logger.Debugf("time(2) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ }
+ if err != nil {
+ x, err = time.ParseInLocation("2006-01-02 15:04:05.9999999 Z07:00", sdata, parseLoc)
+ //session.engine.logger.Debugf("time(3) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ }
+ } else if len(sdata) == 19 && strings.Contains(sdata, "-") {
+ x, err = time.ParseInLocation("2006-01-02 15:04:05", sdata, parseLoc)
+ //session.engine.logger.Debugf("time(4) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ } else if len(sdata) == 10 && sdata[4] == '-' && sdata[7] == '-' {
+ x, err = time.ParseInLocation("2006-01-02", sdata, parseLoc)
+ //session.engine.logger.Debugf("time(5) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ } else if col.SQLType.Name == core.Time {
+ if strings.Contains(sdata, " ") {
+ ssd := strings.Split(sdata, " ")
+ sdata = ssd[1]
+ }
+
+ sdata = strings.TrimSpace(sdata)
+ if session.engine.dialect.DBType() == core.MYSQL && len(sdata) > 8 {
+ sdata = sdata[len(sdata)-8:]
+ }
+
+ st := fmt.Sprintf("2006-01-02 %v", sdata)
+ x, err = time.ParseInLocation("2006-01-02 15:04:05", st, parseLoc)
+ //session.engine.logger.Debugf("time(6) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ } else {
+ outErr = fmt.Errorf("unsupported time format %v", sdata)
+ return
+ }
+ if err != nil {
+ outErr = fmt.Errorf("unsupported time format %v: %v", sdata, err)
+ return
+ }
+ outTime = x.In(session.engine.TZLocation)
+ return
+}
+
+func (session *Session) byte2Time(col *core.Column, data []byte) (outTime time.Time, outErr error) {
+ return session.str2Time(col, string(data))
+}
+
+// convert a db data([]byte) to a field value
+func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, data []byte) error {
+ if structConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok {
+ return structConvert.FromDB(data)
+ }
+
+ if structConvert, ok := fieldValue.Interface().(core.Conversion); ok {
+ return structConvert.FromDB(data)
+ }
+
+ var v interface{}
+ key := col.Name
+ fieldType := fieldValue.Type()
+
+ switch fieldType.Kind() {
+ case reflect.Complex64, reflect.Complex128:
+ x := reflect.New(fieldType)
+ if len(data) > 0 {
+ err := DefaultJSONHandler.Unmarshal(data, x.Interface())
+ if err != nil {
+ session.engine.logger.Error(err)
+ return err
+ }
+ fieldValue.Set(x.Elem())
+ }
+ case reflect.Slice, reflect.Array, reflect.Map:
+ v = data
+ t := fieldType.Elem()
+ k := t.Kind()
+ if col.SQLType.IsText() {
+ x := reflect.New(fieldType)
+ if len(data) > 0 {
+ err := DefaultJSONHandler.Unmarshal(data, x.Interface())
+ if err != nil {
+ session.engine.logger.Error(err)
+ return err
+ }
+ fieldValue.Set(x.Elem())
+ }
+ } else if col.SQLType.IsBlob() {
+ if k == reflect.Uint8 {
+ fieldValue.Set(reflect.ValueOf(v))
+ } else {
+ x := reflect.New(fieldType)
+ if len(data) > 0 {
+ err := DefaultJSONHandler.Unmarshal(data, x.Interface())
+ if err != nil {
+ session.engine.logger.Error(err)
+ return err
+ }
+ fieldValue.Set(x.Elem())
+ }
+ }
+ } else {
+ return ErrUnSupportedType
+ }
+ case reflect.String:
+ fieldValue.SetString(string(data))
+ case reflect.Bool:
+ v, err := asBool(data)
+ if err != nil {
+ return fmt.Errorf("arg %v as bool: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(v))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ sdata := string(data)
+ var x int64
+ var err error
+ // for mysql, when use bit, it returned \x01
+ if col.SQLType.Name == core.Bit &&
+ session.engine.dialect.DBType() == core.MYSQL { // !nashtsai! TODO dialect needs to provide conversion interface API
+ if len(data) == 1 {
+ x = int64(data[0])
+ } else {
+ x = 0
+ }
+ } else if strings.HasPrefix(sdata, "0x") {
+ x, err = strconv.ParseInt(sdata, 16, 64)
+ } else if strings.HasPrefix(sdata, "0") {
+ x, err = strconv.ParseInt(sdata, 8, 64)
+ } else if strings.EqualFold(sdata, "true") {
+ x = 1
+ } else if strings.EqualFold(sdata, "false") {
+ x = 0
+ } else {
+ x, err = strconv.ParseInt(sdata, 10, 64)
+ }
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ fieldValue.SetInt(x)
+ case reflect.Float32, reflect.Float64:
+ x, err := strconv.ParseFloat(string(data), 64)
+ if err != nil {
+ return fmt.Errorf("arg %v as float64: %s", key, err.Error())
+ }
+ fieldValue.SetFloat(x)
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ x, err := strconv.ParseUint(string(data), 10, 64)
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ fieldValue.SetUint(x)
+ //Currently only support Time type
+ case reflect.Struct:
+ // !! 增加支持sql.Scanner接口的结构,如sql.NullString
+ if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok {
+ if err := nulVal.Scan(data); err != nil {
+ return fmt.Errorf("sql.Scan(%v) failed: %s ", data, err.Error())
+ }
+ } else {
+ if fieldType.ConvertibleTo(core.TimeType) {
+ x, err := session.byte2Time(col, data)
+ if err != nil {
+ return err
+ }
+ v = x
+ fieldValue.Set(reflect.ValueOf(v).Convert(fieldType))
+ } else if session.statement.UseCascade {
+ table, err := session.engine.autoMapType(*fieldValue)
+ if err != nil {
+ return err
+ }
+
+ // TODO: current only support 1 primary key
+ if len(table.PrimaryKeys) > 1 {
+ return errors.New("unsupported composited primary key cascade")
+ }
+
+ var pk = make(core.PK, len(table.PrimaryKeys))
+ rawValueType := table.ColumnType(table.PKColumns()[0].FieldName)
+ pk[0], err = str2PK(string(data), rawValueType)
+ if err != nil {
+ return err
+ }
+
+ if !isPKZero(pk) {
+ // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch
+ // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne
+ // property to be fetched lazily
+ structInter := reflect.New(fieldValue.Type())
+ has, err := session.ID(pk).NoCascade().get(structInter.Interface())
+ if err != nil {
+ return err
+ }
+ if has {
+ v = structInter.Elem().Interface()
+ fieldValue.Set(reflect.ValueOf(v))
+ } else {
+ return errors.New("cascade obj is not exist")
+ }
+ }
+ }
+ }
+ case reflect.Ptr:
+ // !nashtsai! TODO merge duplicated codes above
+ //typeStr := fieldType.String()
+ switch fieldType.Elem().Kind() {
+ // case "*string":
+ case core.StringType.Kind():
+ x := string(data)
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*bool":
+ case core.BoolType.Kind():
+ d := string(data)
+ v, err := strconv.ParseBool(d)
+ if err != nil {
+ return fmt.Errorf("arg %v as bool: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(&v).Convert(fieldType))
+ // case "*complex64":
+ case core.Complex64Type.Kind():
+ var x complex64
+ if len(data) > 0 {
+ err := DefaultJSONHandler.Unmarshal(data, &x)
+ if err != nil {
+ session.engine.logger.Error(err)
+ return err
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ }
+ // case "*complex128":
+ case core.Complex128Type.Kind():
+ var x complex128
+ if len(data) > 0 {
+ err := DefaultJSONHandler.Unmarshal(data, &x)
+ if err != nil {
+ session.engine.logger.Error(err)
+ return err
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ }
+ // case "*float64":
+ case core.Float64Type.Kind():
+ x, err := strconv.ParseFloat(string(data), 64)
+ if err != nil {
+ return fmt.Errorf("arg %v as float64: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*float32":
+ case core.Float32Type.Kind():
+ var x float32
+ x1, err := strconv.ParseFloat(string(data), 32)
+ if err != nil {
+ return fmt.Errorf("arg %v as float32: %s", key, err.Error())
+ }
+ x = float32(x1)
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*uint64":
+ case core.Uint64Type.Kind():
+ var x uint64
+ x, err := strconv.ParseUint(string(data), 10, 64)
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*uint":
+ case core.UintType.Kind():
+ var x uint
+ x1, err := strconv.ParseUint(string(data), 10, 64)
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ x = uint(x1)
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*uint32":
+ case core.Uint32Type.Kind():
+ var x uint32
+ x1, err := strconv.ParseUint(string(data), 10, 64)
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ x = uint32(x1)
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*uint8":
+ case core.Uint8Type.Kind():
+ var x uint8
+ x1, err := strconv.ParseUint(string(data), 10, 64)
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ x = uint8(x1)
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*uint16":
+ case core.Uint16Type.Kind():
+ var x uint16
+ x1, err := strconv.ParseUint(string(data), 10, 64)
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ x = uint16(x1)
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*int64":
+ case core.Int64Type.Kind():
+ sdata := string(data)
+ var x int64
+ var err error
+ // for mysql, when use bit, it returned \x01
+ if col.SQLType.Name == core.Bit &&
+ strings.Contains(session.engine.DriverName(), "mysql") {
+ if len(data) == 1 {
+ x = int64(data[0])
+ } else {
+ x = 0
+ }
+ } else if strings.HasPrefix(sdata, "0x") {
+ x, err = strconv.ParseInt(sdata, 16, 64)
+ } else if strings.HasPrefix(sdata, "0") {
+ x, err = strconv.ParseInt(sdata, 8, 64)
+ } else {
+ x, err = strconv.ParseInt(sdata, 10, 64)
+ }
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*int":
+ case core.IntType.Kind():
+ sdata := string(data)
+ var x int
+ var x1 int64
+ var err error
+ // for mysql, when use bit, it returned \x01
+ if col.SQLType.Name == core.Bit &&
+ strings.Contains(session.engine.DriverName(), "mysql") {
+ if len(data) == 1 {
+ x = int(data[0])
+ } else {
+ x = 0
+ }
+ } else if strings.HasPrefix(sdata, "0x") {
+ x1, err = strconv.ParseInt(sdata, 16, 64)
+ x = int(x1)
+ } else if strings.HasPrefix(sdata, "0") {
+ x1, err = strconv.ParseInt(sdata, 8, 64)
+ x = int(x1)
+ } else {
+ x1, err = strconv.ParseInt(sdata, 10, 64)
+ x = int(x1)
+ }
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*int32":
+ case core.Int32Type.Kind():
+ sdata := string(data)
+ var x int32
+ var x1 int64
+ var err error
+ // for mysql, when use bit, it returned \x01
+ if col.SQLType.Name == core.Bit &&
+ session.engine.dialect.DBType() == core.MYSQL {
+ if len(data) == 1 {
+ x = int32(data[0])
+ } else {
+ x = 0
+ }
+ } else if strings.HasPrefix(sdata, "0x") {
+ x1, err = strconv.ParseInt(sdata, 16, 64)
+ x = int32(x1)
+ } else if strings.HasPrefix(sdata, "0") {
+ x1, err = strconv.ParseInt(sdata, 8, 64)
+ x = int32(x1)
+ } else {
+ x1, err = strconv.ParseInt(sdata, 10, 64)
+ x = int32(x1)
+ }
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*int8":
+ case core.Int8Type.Kind():
+ sdata := string(data)
+ var x int8
+ var x1 int64
+ var err error
+ // for mysql, when use bit, it returned \x01
+ if col.SQLType.Name == core.Bit &&
+ strings.Contains(session.engine.DriverName(), "mysql") {
+ if len(data) == 1 {
+ x = int8(data[0])
+ } else {
+ x = 0
+ }
+ } else if strings.HasPrefix(sdata, "0x") {
+ x1, err = strconv.ParseInt(sdata, 16, 64)
+ x = int8(x1)
+ } else if strings.HasPrefix(sdata, "0") {
+ x1, err = strconv.ParseInt(sdata, 8, 64)
+ x = int8(x1)
+ } else {
+ x1, err = strconv.ParseInt(sdata, 10, 64)
+ x = int8(x1)
+ }
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*int16":
+ case core.Int16Type.Kind():
+ sdata := string(data)
+ var x int16
+ var x1 int64
+ var err error
+ // for mysql, when use bit, it returned \x01
+ if col.SQLType.Name == core.Bit &&
+ strings.Contains(session.engine.DriverName(), "mysql") {
+ if len(data) == 1 {
+ x = int16(data[0])
+ } else {
+ x = 0
+ }
+ } else if strings.HasPrefix(sdata, "0x") {
+ x1, err = strconv.ParseInt(sdata, 16, 64)
+ x = int16(x1)
+ } else if strings.HasPrefix(sdata, "0") {
+ x1, err = strconv.ParseInt(sdata, 8, 64)
+ x = int16(x1)
+ } else {
+ x1, err = strconv.ParseInt(sdata, 10, 64)
+ x = int16(x1)
+ }
+ if err != nil {
+ return fmt.Errorf("arg %v as int: %s", key, err.Error())
+ }
+ fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType))
+ // case "*SomeStruct":
+ case reflect.Struct:
+ switch fieldType {
+ // case "*.time.Time":
+ case core.PtrTimeType:
+ x, err := session.byte2Time(col, data)
+ if err != nil {
+ return err
+ }
+ v = x
+ fieldValue.Set(reflect.ValueOf(&x))
+ default:
+ if session.statement.UseCascade {
+ structInter := reflect.New(fieldType.Elem())
+ table, err := session.engine.autoMapType(structInter.Elem())
+ if err != nil {
+ return err
+ }
+
+ if len(table.PrimaryKeys) > 1 {
+ return errors.New("unsupported composited primary key cascade")
+ }
+
+ var pk = make(core.PK, len(table.PrimaryKeys))
+ rawValueType := table.ColumnType(table.PKColumns()[0].FieldName)
+ pk[0], err = str2PK(string(data), rawValueType)
+ if err != nil {
+ return err
+ }
+
+ if !isPKZero(pk) {
+ // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch
+ // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne
+ // property to be fetched lazily
+ has, err := session.ID(pk).NoCascade().get(structInter.Interface())
+ if err != nil {
+ return err
+ }
+ if has {
+ v = structInter.Interface()
+ fieldValue.Set(reflect.ValueOf(v))
+ } else {
+ return errors.New("cascade obj is not exist")
+ }
+ }
+ } else {
+ return fmt.Errorf("unsupported struct type in Scan: %s", fieldValue.Type().String())
+ }
+ }
+ default:
+ return fmt.Errorf("unsupported type in Scan: %s", fieldValue.Type().String())
+ }
+ default:
+ return fmt.Errorf("unsupported type in Scan: %s", fieldValue.Type().String())
+ }
+
+ return nil
+}
+
+// convert a field value of a struct to interface for put into db
+func (session *Session) value2Interface(col *core.Column, fieldValue reflect.Value) (interface{}, error) {
+ if fieldValue.CanAddr() {
+ if fieldConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok {
+ data, err := fieldConvert.ToDB()
+ if err != nil {
+ return 0, err
+ }
+ if col.SQLType.IsBlob() {
+ return data, nil
+ }
+ return string(data), nil
+ }
+ }
+
+ if fieldConvert, ok := fieldValue.Interface().(core.Conversion); ok {
+ data, err := fieldConvert.ToDB()
+ if err != nil {
+ return 0, err
+ }
+ if col.SQLType.IsBlob() {
+ return data, nil
+ }
+ return string(data), nil
+ }
+
+ fieldType := fieldValue.Type()
+ k := fieldType.Kind()
+ if k == reflect.Ptr {
+ if fieldValue.IsNil() {
+ return nil, nil
+ } else if !fieldValue.IsValid() {
+ session.engine.logger.Warn("the field[", col.FieldName, "] is invalid")
+ return nil, nil
+ } else {
+ // !nashtsai! deference pointer type to instance type
+ fieldValue = fieldValue.Elem()
+ fieldType = fieldValue.Type()
+ k = fieldType.Kind()
+ }
+ }
+
+ switch k {
+ case reflect.Bool:
+ return fieldValue.Bool(), nil
+ case reflect.String:
+ return fieldValue.String(), nil
+ case reflect.Struct:
+ if fieldType.ConvertibleTo(core.TimeType) {
+ t := fieldValue.Convert(core.TimeType).Interface().(time.Time)
+ tf := session.engine.formatColTime(col, t)
+ return tf, nil
+ }
+
+ if !col.SQLType.IsJson() {
+ // !! 增加支持driver.Valuer接口的结构,如sql.NullString
+ if v, ok := fieldValue.Interface().(driver.Valuer); ok {
+ return v.Value()
+ }
+
+ fieldTable, err := session.engine.autoMapType(fieldValue)
+ if err != nil {
+ return nil, err
+ }
+ if len(fieldTable.PrimaryKeys) == 1 {
+ pkField := reflect.Indirect(fieldValue).FieldByName(fieldTable.PKColumns()[0].FieldName)
+ return pkField.Interface(), nil
+ }
+ return 0, fmt.Errorf("no primary key for col %v", col.Name)
+ }
+
+ if col.SQLType.IsText() {
+ bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ session.engine.logger.Error(err)
+ return 0, err
+ }
+ return string(bytes), nil
+ } else if col.SQLType.IsBlob() {
+ bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ session.engine.logger.Error(err)
+ return 0, err
+ }
+ return bytes, nil
+ }
+ return nil, fmt.Errorf("Unsupported type %v", fieldValue.Type())
+ case reflect.Complex64, reflect.Complex128:
+ bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ session.engine.logger.Error(err)
+ return 0, err
+ }
+ return string(bytes), nil
+ case reflect.Array, reflect.Slice, reflect.Map:
+ if !fieldValue.IsValid() {
+ return fieldValue.Interface(), nil
+ }
+
+ if col.SQLType.IsText() {
+ bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ session.engine.logger.Error(err)
+ return 0, err
+ }
+ return string(bytes), nil
+ } else if col.SQLType.IsBlob() {
+ var bytes []byte
+ var err error
+ if (k == reflect.Slice) &&
+ (fieldValue.Type().Elem().Kind() == reflect.Uint8) {
+ bytes = fieldValue.Bytes()
+ } else {
+ bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ session.engine.logger.Error(err)
+ return 0, err
+ }
+ }
+ return bytes, nil
+ }
+ return nil, ErrUnSupportedType
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return int64(fieldValue.Uint()), nil
+ default:
+ return fieldValue.Interface(), nil
+ }
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_delete.go b/vendor/github.com/go-xorm/xorm/session_delete.go
new file mode 100644
index 0000000..675d4d8
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_delete.go
@@ -0,0 +1,244 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+
+ "xorm.io/core"
+)
+
+func (session *Session) cacheDelete(table *core.Table, tableName, sqlStr string, args ...interface{}) error {
+ if table == nil ||
+ session.tx != nil {
+ return ErrCacheFailed
+ }
+
+ for _, filter := range session.engine.dialect.Filters() {
+ sqlStr = filter.Do(sqlStr, session.engine.dialect, table)
+ }
+
+ newsql := session.statement.convertIDSQL(sqlStr)
+ if newsql == "" {
+ return ErrCacheFailed
+ }
+
+ cacher := session.engine.getCacher(tableName)
+ pkColumns := table.PKColumns()
+ ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
+ if err != nil {
+ resultsSlice, err := session.queryBytes(newsql, args...)
+ if err != nil {
+ return err
+ }
+ ids = make([]core.PK, 0)
+ if len(resultsSlice) > 0 {
+ for _, data := range resultsSlice {
+ var id int64
+ var pk core.PK = make([]interface{}, 0)
+ for _, col := range pkColumns {
+ if v, ok := data[col.Name]; !ok {
+ return errors.New("no id")
+ } else if col.SQLType.IsText() {
+ pk = append(pk, string(v))
+ } else if col.SQLType.IsNumeric() {
+ id, err = strconv.ParseInt(string(v), 10, 64)
+ if err != nil {
+ return err
+ }
+ pk = append(pk, id)
+ } else {
+ return errors.New("not supported primary key type")
+ }
+ }
+ ids = append(ids, pk)
+ }
+ }
+ }
+
+ for _, id := range ids {
+ session.engine.logger.Debug("[cacheDelete] delete cache obj:", tableName, id)
+ sid, err := id.ToString()
+ if err != nil {
+ return err
+ }
+ cacher.DelBean(tableName, sid)
+ }
+ session.engine.logger.Debug("[cacheDelete] clear cache table:", tableName)
+ cacher.ClearIds(tableName)
+ return nil
+}
+
+// Delete records, bean's non-empty fields are conditions
+func (session *Session) Delete(bean interface{}) (int64, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ if session.statement.lastError != nil {
+ return 0, session.statement.lastError
+ }
+
+ if err := session.statement.setRefBean(bean); err != nil {
+ return 0, err
+ }
+
+ // handle before delete processors
+ for _, closure := range session.beforeClosures {
+ closure(bean)
+ }
+ cleanupProcessorsClosures(&session.beforeClosures)
+
+ if processor, ok := interface{}(bean).(BeforeDeleteProcessor); ok {
+ processor.BeforeDelete()
+ }
+
+ condSQL, condArgs, err := session.statement.genConds(bean)
+ if err != nil {
+ return 0, err
+ }
+ if len(condSQL) == 0 && session.statement.LimitN == 0 {
+ return 0, ErrNeedDeletedCond
+ }
+
+ var tableNameNoQuote = session.statement.TableName()
+ var tableName = session.engine.Quote(tableNameNoQuote)
+ var table = session.statement.RefTable
+ var deleteSQL string
+ if len(condSQL) > 0 {
+ deleteSQL = fmt.Sprintf("DELETE FROM %v WHERE %v", tableName, condSQL)
+ } else {
+ deleteSQL = fmt.Sprintf("DELETE FROM %v", tableName)
+ }
+
+ var orderSQL string
+ if len(session.statement.OrderStr) > 0 {
+ orderSQL += fmt.Sprintf(" ORDER BY %s", session.statement.OrderStr)
+ }
+ if session.statement.LimitN > 0 {
+ orderSQL += fmt.Sprintf(" LIMIT %d", session.statement.LimitN)
+ }
+
+ if len(orderSQL) > 0 {
+ switch session.engine.dialect.DBType() {
+ case core.POSTGRES:
+ inSQL := fmt.Sprintf("ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQL)
+ if len(condSQL) > 0 {
+ deleteSQL += " AND " + inSQL
+ } else {
+ deleteSQL += " WHERE " + inSQL
+ }
+ case core.SQLITE:
+ inSQL := fmt.Sprintf("rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQL)
+ if len(condSQL) > 0 {
+ deleteSQL += " AND " + inSQL
+ } else {
+ deleteSQL += " WHERE " + inSQL
+ }
+ // TODO: how to handle delete limit on mssql?
+ case core.MSSQL:
+ return 0, ErrNotImplemented
+ default:
+ deleteSQL += orderSQL
+ }
+ }
+
+ var realSQL string
+ argsForCache := make([]interface{}, 0, len(condArgs)*2)
+ if session.statement.unscoped || table.DeletedColumn() == nil { // tag "deleted" is disabled
+ realSQL = deleteSQL
+ copy(argsForCache, condArgs)
+ argsForCache = append(condArgs, argsForCache...)
+ } else {
+ // !oinume! sqlStrForCache and argsForCache is needed to behave as executing "DELETE FROM ..." for cache.
+ copy(argsForCache, condArgs)
+ argsForCache = append(condArgs, argsForCache...)
+
+ deletedColumn := table.DeletedColumn()
+ realSQL = fmt.Sprintf("UPDATE %v SET %v = ? WHERE %v",
+ session.engine.Quote(session.statement.TableName()),
+ session.engine.Quote(deletedColumn.Name),
+ condSQL)
+
+ if len(orderSQL) > 0 {
+ switch session.engine.dialect.DBType() {
+ case core.POSTGRES:
+ inSQL := fmt.Sprintf("ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQL)
+ if len(condSQL) > 0 {
+ realSQL += " AND " + inSQL
+ } else {
+ realSQL += " WHERE " + inSQL
+ }
+ case core.SQLITE:
+ inSQL := fmt.Sprintf("rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQL)
+ if len(condSQL) > 0 {
+ realSQL += " AND " + inSQL
+ } else {
+ realSQL += " WHERE " + inSQL
+ }
+ // TODO: how to handle delete limit on mssql?
+ case core.MSSQL:
+ return 0, ErrNotImplemented
+ default:
+ realSQL += orderSQL
+ }
+ }
+
+ // !oinume! Insert nowTime to the head of session.statement.Params
+ condArgs = append(condArgs, "")
+ paramsLen := len(condArgs)
+ copy(condArgs[1:paramsLen], condArgs[0:paramsLen-1])
+
+ val, t := session.engine.nowTime(deletedColumn)
+ condArgs[0] = val
+
+ var colName = deletedColumn.Name
+ session.afterClosures = append(session.afterClosures, func(bean interface{}) {
+ col := table.GetColumn(colName)
+ setColumnTime(bean, col, t)
+ })
+ }
+
+ if cacher := session.engine.getCacher(tableNameNoQuote); cacher != nil && session.statement.UseCache {
+ session.cacheDelete(table, tableNameNoQuote, deleteSQL, argsForCache...)
+ }
+
+ session.statement.RefTable = table
+ res, err := session.exec(realSQL, condArgs...)
+ if err != nil {
+ return 0, err
+ }
+
+ // handle after delete processors
+ if session.isAutoCommit {
+ for _, closure := range session.afterClosures {
+ closure(bean)
+ }
+ if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok {
+ processor.AfterDelete()
+ }
+ } else {
+ lenAfterClosures := len(session.afterClosures)
+ if lenAfterClosures > 0 {
+ if value, has := session.afterDeleteBeans[bean]; has && value != nil {
+ *value = append(*value, session.afterClosures...)
+ } else {
+ afterClosures := make([]func(interface{}), lenAfterClosures)
+ copy(afterClosures, session.afterClosures)
+ session.afterDeleteBeans[bean] = &afterClosures
+ }
+ } else {
+ if _, ok := interface{}(bean).(AfterDeleteProcessor); ok {
+ session.afterDeleteBeans[bean] = nil
+ }
+ }
+ }
+ cleanupProcessorsClosures(&session.afterClosures)
+ // --
+
+ return res.RowsAffected()
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_exist.go b/vendor/github.com/go-xorm/xorm/session_exist.go
new file mode 100644
index 0000000..660cc47
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_exist.go
@@ -0,0 +1,96 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "xorm.io/builder"
+ "xorm.io/core"
+)
+
+// Exist returns true if the record exist otherwise return false
+func (session *Session) Exist(bean ...interface{}) (bool, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ if session.statement.lastError != nil {
+ return false, session.statement.lastError
+ }
+
+ var sqlStr string
+ var args []interface{}
+ var err error
+
+ if session.statement.RawSQL == "" {
+ if len(bean) == 0 {
+ tableName := session.statement.TableName()
+ if len(tableName) <= 0 {
+ return false, ErrTableNotFound
+ }
+
+ tableName = session.statement.Engine.Quote(tableName)
+
+ if session.statement.cond.IsValid() {
+ condSQL, condArgs, err := builder.ToSQL(session.statement.cond)
+ if err != nil {
+ return false, err
+ }
+
+ if session.engine.dialect.DBType() == core.MSSQL {
+ sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s WHERE %s", tableName, condSQL)
+ } else if session.engine.dialect.DBType() == core.ORACLE {
+ sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE (%s) AND ROWNUM=1", tableName, condSQL)
+ } else {
+ sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE %s LIMIT 1", tableName, condSQL)
+ }
+ args = condArgs
+ } else {
+ if session.engine.dialect.DBType() == core.MSSQL {
+ sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s", tableName)
+ } else if session.engine.dialect.DBType() == core.ORACLE {
+ sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE ROWNUM=1", tableName)
+ } else {
+ sqlStr = fmt.Sprintf("SELECT * FROM %s LIMIT 1", tableName)
+ }
+ args = []interface{}{}
+ }
+ } else {
+ beanValue := reflect.ValueOf(bean[0])
+ if beanValue.Kind() != reflect.Ptr {
+ return false, errors.New("needs a pointer")
+ }
+
+ if beanValue.Elem().Kind() == reflect.Struct {
+ if err := session.statement.setRefBean(bean[0]); err != nil {
+ return false, err
+ }
+ }
+
+ if len(session.statement.TableName()) <= 0 {
+ return false, ErrTableNotFound
+ }
+ session.statement.Limit(1)
+ sqlStr, args, err = session.statement.genGetSQL(bean[0])
+ if err != nil {
+ return false, err
+ }
+ }
+ } else {
+ sqlStr = session.statement.RawSQL
+ args = session.statement.RawParams
+ }
+
+ rows, err := session.queryRows(sqlStr, args...)
+ if err != nil {
+ return false, err
+ }
+ defer rows.Close()
+
+ return rows.Next(), nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_find.go b/vendor/github.com/go-xorm/xorm/session_find.go
new file mode 100644
index 0000000..6b8aa46
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_find.go
@@ -0,0 +1,505 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "xorm.io/builder"
+ "xorm.io/core"
+)
+
+const (
+ tpStruct = iota
+ tpNonStruct
+)
+
+// Find retrieve records from table, condiBeans's non-empty fields
+// are conditions. beans could be []Struct, []*Struct, map[int64]Struct
+// map[int64]*Struct
+func (session *Session) Find(rowsSlicePtr interface{}, condiBean ...interface{}) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+ return session.find(rowsSlicePtr, condiBean...)
+}
+
+// FindAndCount find the results and also return the counts
+func (session *Session) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ session.autoResetStatement = false
+ err := session.find(rowsSlicePtr, condiBean...)
+ if err != nil {
+ return 0, err
+ }
+
+ sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
+ if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map {
+ return 0, errors.New("needs a pointer to a slice or a map")
+ }
+
+ sliceElementType := sliceValue.Type().Elem()
+ if sliceElementType.Kind() == reflect.Ptr {
+ sliceElementType = sliceElementType.Elem()
+ }
+ session.autoResetStatement = true
+
+ if session.statement.selectStr != "" {
+ session.statement.selectStr = ""
+ }
+ if session.statement.OrderStr != "" {
+ session.statement.OrderStr = ""
+ }
+
+ return session.Count(reflect.New(sliceElementType).Interface())
+}
+
+func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) error {
+ defer session.resetStatement()
+
+ if session.statement.lastError != nil {
+ return session.statement.lastError
+ }
+
+ sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
+ if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map {
+ return errors.New("needs a pointer to a slice or a map")
+ }
+
+ sliceElementType := sliceValue.Type().Elem()
+
+ var tp = tpStruct
+ if session.statement.RefTable == nil {
+ if sliceElementType.Kind() == reflect.Ptr {
+ if sliceElementType.Elem().Kind() == reflect.Struct {
+ pv := reflect.New(sliceElementType.Elem())
+ if err := session.statement.setRefValue(pv); err != nil {
+ return err
+ }
+ } else {
+ tp = tpNonStruct
+ }
+ } else if sliceElementType.Kind() == reflect.Struct {
+ pv := reflect.New(sliceElementType)
+ if err := session.statement.setRefValue(pv); err != nil {
+ return err
+ }
+ } else {
+ tp = tpNonStruct
+ }
+ }
+
+ var table = session.statement.RefTable
+
+ var addedTableName = (len(session.statement.JoinStr) > 0)
+ var autoCond builder.Cond
+ if tp == tpStruct {
+ if !session.statement.noAutoCondition && len(condiBean) > 0 {
+ var err error
+ autoCond, err = session.statement.buildConds(table, condiBean[0], true, true, false, true, addedTableName)
+ if err != nil {
+ return err
+ }
+ } else {
+ // !oinume! Add "
IS NULL" to WHERE whatever condiBean is given.
+ // See https://github.com/go-xorm/xorm/issues/179
+ if col := table.DeletedColumn(); col != nil && !session.statement.unscoped { // tag "deleted" is enabled
+ var colName = session.engine.Quote(col.Name)
+ if addedTableName {
+ var nm = session.statement.TableName()
+ if len(session.statement.TableAlias) > 0 {
+ nm = session.statement.TableAlias
+ }
+ colName = session.engine.Quote(nm) + "." + colName
+ }
+
+ autoCond = session.engine.CondDeleted(colName)
+ }
+ }
+ }
+
+ var sqlStr string
+ var args []interface{}
+ var err error
+ if session.statement.RawSQL == "" {
+ if len(session.statement.TableName()) <= 0 {
+ return ErrTableNotFound
+ }
+
+ var columnStr = session.statement.ColumnStr
+ if len(session.statement.selectStr) > 0 {
+ columnStr = session.statement.selectStr
+ } else {
+ if session.statement.JoinStr == "" {
+ if columnStr == "" {
+ if session.statement.GroupByStr != "" {
+ columnStr = session.engine.quoteColumns(session.statement.GroupByStr)
+ } else {
+ columnStr = session.statement.genColumnStr()
+ }
+ }
+ } else {
+ if columnStr == "" {
+ if session.statement.GroupByStr != "" {
+ columnStr = session.engine.quoteColumns(session.statement.GroupByStr)
+ } else {
+ columnStr = "*"
+ }
+ }
+ }
+ if columnStr == "" {
+ columnStr = "*"
+ }
+ }
+
+ session.statement.cond = session.statement.cond.And(autoCond)
+ condSQL, condArgs, err := builder.ToSQL(session.statement.cond)
+ if err != nil {
+ return err
+ }
+
+ args = append(session.statement.joinArgs, condArgs...)
+ sqlStr, err = session.statement.genSelectSQL(columnStr, condSQL, true, true)
+ if err != nil {
+ return err
+ }
+ // for mssql and use limit
+ qs := strings.Count(sqlStr, "?")
+ if len(args)*2 == qs {
+ args = append(args, args...)
+ }
+ } else {
+ sqlStr = session.statement.RawSQL
+ args = session.statement.RawParams
+ }
+
+ if session.canCache() {
+ if cacher := session.engine.getCacher(session.statement.TableName()); cacher != nil &&
+ !session.statement.IsDistinct &&
+ !session.statement.unscoped {
+ err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...)
+ if err != ErrCacheFailed {
+ return err
+ }
+ err = nil // !nashtsai! reset err to nil for ErrCacheFailed
+ session.engine.logger.Warn("Cache Find Failed")
+ }
+ }
+
+ return session.noCacheFind(table, sliceValue, sqlStr, args...)
+}
+
+func (session *Session) noCacheFind(table *core.Table, containerValue reflect.Value, sqlStr string, args ...interface{}) error {
+ rows, err := session.queryRows(sqlStr, args...)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ fields, err := rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ var newElemFunc func(fields []string) reflect.Value
+ elemType := containerValue.Type().Elem()
+ var isPointer bool
+ if elemType.Kind() == reflect.Ptr {
+ isPointer = true
+ elemType = elemType.Elem()
+ }
+ if elemType.Kind() == reflect.Ptr {
+ return errors.New("pointer to pointer is not supported")
+ }
+
+ newElemFunc = func(fields []string) reflect.Value {
+ switch elemType.Kind() {
+ case reflect.Slice:
+ slice := reflect.MakeSlice(elemType, len(fields), len(fields))
+ x := reflect.New(slice.Type())
+ x.Elem().Set(slice)
+ return x
+ case reflect.Map:
+ mp := reflect.MakeMap(elemType)
+ x := reflect.New(mp.Type())
+ x.Elem().Set(mp)
+ return x
+ }
+ return reflect.New(elemType)
+ }
+
+ var containerValueSetFunc func(*reflect.Value, core.PK) error
+
+ if containerValue.Kind() == reflect.Slice {
+ containerValueSetFunc = func(newValue *reflect.Value, pk core.PK) error {
+ if isPointer {
+ containerValue.Set(reflect.Append(containerValue, newValue.Elem().Addr()))
+ } else {
+ containerValue.Set(reflect.Append(containerValue, newValue.Elem()))
+ }
+ return nil
+ }
+ } else {
+ keyType := containerValue.Type().Key()
+ if len(table.PrimaryKeys) == 0 {
+ return errors.New("don't support multiple primary key's map has non-slice key type")
+ }
+ if len(table.PrimaryKeys) > 1 && keyType.Kind() != reflect.Slice {
+ return errors.New("don't support multiple primary key's map has non-slice key type")
+ }
+
+ containerValueSetFunc = func(newValue *reflect.Value, pk core.PK) error {
+ keyValue := reflect.New(keyType)
+ err := convertPKToValue(table, keyValue.Interface(), pk)
+ if err != nil {
+ return err
+ }
+ if isPointer {
+ containerValue.SetMapIndex(keyValue.Elem(), newValue.Elem().Addr())
+ } else {
+ containerValue.SetMapIndex(keyValue.Elem(), newValue.Elem())
+ }
+ return nil
+ }
+ }
+
+ if elemType.Kind() == reflect.Struct {
+ var newValue = newElemFunc(fields)
+ dataStruct := rValue(newValue.Interface())
+ tb, err := session.engine.autoMapType(dataStruct)
+ if err != nil {
+ return err
+ }
+ err = session.rows2Beans(rows, fields, tb, newElemFunc, containerValueSetFunc)
+ rows.Close()
+ if err != nil {
+ return err
+ }
+ return session.executeProcessors()
+ }
+
+ for rows.Next() {
+ var newValue = newElemFunc(fields)
+ bean := newValue.Interface()
+
+ switch elemType.Kind() {
+ case reflect.Slice:
+ err = rows.ScanSlice(bean)
+ case reflect.Map:
+ err = rows.ScanMap(bean)
+ default:
+ err = rows.Scan(bean)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if err := containerValueSetFunc(&newValue, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func convertPKToValue(table *core.Table, dst interface{}, pk core.PK) error {
+ cols := table.PKColumns()
+ if len(cols) == 1 {
+ return convertAssign(dst, pk[0])
+ }
+
+ dst = pk
+ return nil
+}
+
+func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr interface{}, args ...interface{}) (err error) {
+ if !session.canCache() ||
+ indexNoCase(sqlStr, "having") != -1 ||
+ indexNoCase(sqlStr, "group by") != -1 {
+ return ErrCacheFailed
+ }
+
+ tableName := session.statement.TableName()
+ cacher := session.engine.getCacher(tableName)
+ if cacher == nil {
+ return nil
+ }
+
+ for _, filter := range session.engine.dialect.Filters() {
+ sqlStr = filter.Do(sqlStr, session.engine.dialect, session.statement.RefTable)
+ }
+
+ newsql := session.statement.convertIDSQL(sqlStr)
+ if newsql == "" {
+ return ErrCacheFailed
+ }
+
+ table := session.statement.RefTable
+ ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
+ if err != nil {
+ rows, err := session.queryRows(newsql, args...)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ var i int
+ ids = make([]core.PK, 0)
+ for rows.Next() {
+ i++
+ if i > 500 {
+ session.engine.logger.Debug("[cacheFind] ids length > 500, no cache")
+ return ErrCacheFailed
+ }
+ var res = make([]string, len(table.PrimaryKeys))
+ err = rows.ScanSlice(&res)
+ if err != nil {
+ return err
+ }
+ var pk core.PK = make([]interface{}, len(table.PrimaryKeys))
+ for i, col := range table.PKColumns() {
+ pk[i], err = session.engine.idTypeAssertion(col, res[i])
+ if err != nil {
+ return err
+ }
+ }
+
+ ids = append(ids, pk)
+ }
+
+ session.engine.logger.Debug("[cacheFind] cache sql:", ids, tableName, sqlStr, newsql, args)
+ err = core.PutCacheSql(cacher, ids, tableName, newsql, args)
+ if err != nil {
+ return err
+ }
+ } else {
+ session.engine.logger.Debug("[cacheFind] cache hit sql:", tableName, sqlStr, newsql, args)
+ }
+
+ sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
+
+ ididxes := make(map[string]int)
+ var ides []core.PK
+ var temps = make([]interface{}, len(ids))
+
+ for idx, id := range ids {
+ sid, err := id.ToString()
+ if err != nil {
+ return err
+ }
+ bean := cacher.GetBean(tableName, sid)
+ if bean == nil || reflect.ValueOf(bean).Elem().Type() != t {
+ ides = append(ides, id)
+ ididxes[sid] = idx
+ } else {
+ session.engine.logger.Debug("[cacheFind] cache hit bean:", tableName, id, bean)
+
+ pk := session.engine.IdOf(bean)
+ xid, err := pk.ToString()
+ if err != nil {
+ return err
+ }
+
+ if sid != xid {
+ session.engine.logger.Error("[cacheFind] error cache", xid, sid, bean)
+ return ErrCacheFailed
+ }
+ temps[idx] = bean
+ }
+ }
+
+ if len(ides) > 0 {
+ slices := reflect.New(reflect.SliceOf(t))
+ beans := slices.Interface()
+
+ if len(table.PrimaryKeys) == 1 {
+ ff := make([]interface{}, 0, len(ides))
+ for _, ie := range ides {
+ ff = append(ff, ie[0])
+ }
+
+ session.In("`"+table.PrimaryKeys[0]+"`", ff...)
+ } else {
+ for _, ie := range ides {
+ cond := builder.NewCond()
+ for i, name := range table.PrimaryKeys {
+ cond = cond.And(builder.Eq{"`" + name + "`": ie[i]})
+ }
+ session.Or(cond)
+ }
+ }
+
+ err = session.NoCache().Table(tableName).find(beans)
+ if err != nil {
+ return err
+ }
+
+ vs := reflect.Indirect(reflect.ValueOf(beans))
+ for i := 0; i < vs.Len(); i++ {
+ rv := vs.Index(i)
+ if rv.Kind() != reflect.Ptr {
+ rv = rv.Addr()
+ }
+ id, err := session.engine.idOfV(rv)
+ if err != nil {
+ return err
+ }
+ sid, err := id.ToString()
+ if err != nil {
+ return err
+ }
+
+ bean := rv.Interface()
+ temps[ididxes[sid]] = bean
+ session.engine.logger.Debug("[cacheFind] cache bean:", tableName, id, bean, temps)
+ cacher.PutBean(tableName, sid, bean)
+ }
+ }
+
+ for j := 0; j < len(temps); j++ {
+ bean := temps[j]
+ if bean == nil {
+ session.engine.logger.Warn("[cacheFind] cache no hit:", tableName, ids[j], temps)
+ // return errors.New("cache error") // !nashtsai! no need to return error, but continue instead
+ continue
+ }
+ if sliceValue.Kind() == reflect.Slice {
+ if t.Kind() == reflect.Ptr {
+ sliceValue.Set(reflect.Append(sliceValue, reflect.ValueOf(bean)))
+ } else {
+ sliceValue.Set(reflect.Append(sliceValue, reflect.Indirect(reflect.ValueOf(bean))))
+ }
+ } else if sliceValue.Kind() == reflect.Map {
+ var key = ids[j]
+ keyType := sliceValue.Type().Key()
+ var ikey interface{}
+ if len(key) == 1 {
+ ikey, err = str2PK(fmt.Sprintf("%v", key[0]), keyType)
+ if err != nil {
+ return err
+ }
+ } else {
+ if keyType.Kind() != reflect.Slice {
+ return errors.New("table have multiple primary keys, key is not core.PK or slice")
+ }
+ ikey = key
+ }
+
+ if t.Kind() == reflect.Ptr {
+ sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.ValueOf(bean))
+ } else {
+ sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.Indirect(reflect.ValueOf(bean)))
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_get.go b/vendor/github.com/go-xorm/xorm/session_get.go
new file mode 100644
index 0000000..cc0a201
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_get.go
@@ -0,0 +1,356 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "xorm.io/core"
+)
+
+// Get retrieve one record from database, bean's non-empty fields
+// will be as conditions
+func (session *Session) Get(bean interface{}) (bool, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+ return session.get(bean)
+}
+
+func (session *Session) get(bean interface{}) (bool, error) {
+ defer session.resetStatement()
+
+ if session.statement.lastError != nil {
+ return false, session.statement.lastError
+ }
+
+ beanValue := reflect.ValueOf(bean)
+ if beanValue.Kind() != reflect.Ptr {
+ return false, errors.New("needs a pointer to a value")
+ } else if beanValue.Elem().Kind() == reflect.Ptr {
+ return false, errors.New("a pointer to a pointer is not allowed")
+ }
+
+ if beanValue.Elem().Kind() == reflect.Struct {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return false, err
+ }
+ }
+
+ var sqlStr string
+ var args []interface{}
+ var err error
+
+ if session.statement.RawSQL == "" {
+ if len(session.statement.TableName()) <= 0 {
+ return false, ErrTableNotFound
+ }
+ session.statement.Limit(1)
+ sqlStr, args, err = session.statement.genGetSQL(bean)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ sqlStr = session.statement.RawSQL
+ args = session.statement.RawParams
+ }
+
+ table := session.statement.RefTable
+
+ if session.canCache() && beanValue.Elem().Kind() == reflect.Struct {
+ if cacher := session.engine.getCacher(session.statement.TableName()); cacher != nil &&
+ !session.statement.unscoped {
+ has, err := session.cacheGet(bean, sqlStr, args...)
+ if err != ErrCacheFailed {
+ return has, err
+ }
+ }
+ }
+
+ context := session.statement.context
+ if context != nil {
+ res := context.Get(fmt.Sprintf("%v-%v", sqlStr, args))
+ if res != nil {
+ session.engine.logger.Debug("hit context cache", sqlStr)
+
+ structValue := reflect.Indirect(reflect.ValueOf(bean))
+ structValue.Set(reflect.Indirect(reflect.ValueOf(res)))
+ session.lastSQL = ""
+ session.lastSQLArgs = nil
+ return true, nil
+ }
+ }
+
+ has, err := session.nocacheGet(beanValue.Elem().Kind(), table, bean, sqlStr, args...)
+ if err != nil || !has {
+ return has, err
+ }
+
+ if context != nil {
+ context.Put(fmt.Sprintf("%v-%v", sqlStr, args), bean)
+ }
+
+ return true, nil
+}
+
+func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bean interface{}, sqlStr string, args ...interface{}) (bool, error) {
+ rows, err := session.queryRows(sqlStr, args...)
+ if err != nil {
+ return false, err
+ }
+ defer rows.Close()
+
+ if !rows.Next() {
+ if rows.Err() != nil {
+ return false, rows.Err()
+ }
+ return false, nil
+ }
+
+ switch bean.(type) {
+ case sql.NullInt64, sql.NullBool, sql.NullFloat64, sql.NullString:
+ return true, rows.Scan(&bean)
+ case *sql.NullInt64, *sql.NullBool, *sql.NullFloat64, *sql.NullString:
+ return true, rows.Scan(bean)
+ case *string:
+ var res sql.NullString
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*string)) = res.String
+ }
+ return true, nil
+ case *int:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*int)) = int(res.Int64)
+ }
+ return true, nil
+ case *int8:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*int8)) = int8(res.Int64)
+ }
+ return true, nil
+ case *int16:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*int16)) = int16(res.Int64)
+ }
+ return true, nil
+ case *int32:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*int32)) = int32(res.Int64)
+ }
+ return true, nil
+ case *int64:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*int64)) = int64(res.Int64)
+ }
+ return true, nil
+ case *uint:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*uint)) = uint(res.Int64)
+ }
+ return true, nil
+ case *uint8:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*uint8)) = uint8(res.Int64)
+ }
+ return true, nil
+ case *uint16:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*uint16)) = uint16(res.Int64)
+ }
+ return true, nil
+ case *uint32:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*uint32)) = uint32(res.Int64)
+ }
+ return true, nil
+ case *uint64:
+ var res sql.NullInt64
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*uint64)) = uint64(res.Int64)
+ }
+ return true, nil
+ case *bool:
+ var res sql.NullBool
+ if err := rows.Scan(&res); err != nil {
+ return true, err
+ }
+ if res.Valid {
+ *(bean.(*bool)) = res.Bool
+ }
+ return true, nil
+ }
+
+ switch beanKind {
+ case reflect.Struct:
+ fields, err := rows.Columns()
+ if err != nil {
+ // WARN: Alougth rows return true, but get fields failed
+ return true, err
+ }
+
+ scanResults, err := session.row2Slice(rows, fields, bean)
+ if err != nil {
+ return false, err
+ }
+ // close it before covert data
+ rows.Close()
+
+ dataStruct := rValue(bean)
+ _, err = session.slice2Bean(scanResults, fields, bean, &dataStruct, table)
+ if err != nil {
+ return true, err
+ }
+
+ return true, session.executeProcessors()
+ case reflect.Slice:
+ err = rows.ScanSlice(bean)
+ case reflect.Map:
+ err = rows.ScanMap(bean)
+ case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ err = rows.Scan(bean)
+ default:
+ err = rows.Scan(bean)
+ }
+
+ return true, err
+}
+
+func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interface{}) (has bool, err error) {
+ // if has no reftable, then don't use cache currently
+ if !session.canCache() {
+ return false, ErrCacheFailed
+ }
+
+ for _, filter := range session.engine.dialect.Filters() {
+ sqlStr = filter.Do(sqlStr, session.engine.dialect, session.statement.RefTable)
+ }
+ newsql := session.statement.convertIDSQL(sqlStr)
+ if newsql == "" {
+ return false, ErrCacheFailed
+ }
+
+ tableName := session.statement.TableName()
+ cacher := session.engine.getCacher(tableName)
+
+ session.engine.logger.Debug("[cacheGet] find sql:", newsql, args)
+ table := session.statement.RefTable
+ ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
+ if err != nil {
+ var res = make([]string, len(table.PrimaryKeys))
+ rows, err := session.NoCache().queryRows(newsql, args...)
+ if err != nil {
+ return false, err
+ }
+ defer rows.Close()
+
+ if rows.Next() {
+ err = rows.ScanSlice(&res)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ return false, ErrCacheFailed
+ }
+
+ var pk core.PK = make([]interface{}, len(table.PrimaryKeys))
+ for i, col := range table.PKColumns() {
+ if col.SQLType.IsText() {
+ pk[i] = res[i]
+ } else if col.SQLType.IsNumeric() {
+ n, err := strconv.ParseInt(res[i], 10, 64)
+ if err != nil {
+ return false, err
+ }
+ pk[i] = n
+ } else {
+ return false, errors.New("unsupported")
+ }
+ }
+
+ ids = []core.PK{pk}
+ session.engine.logger.Debug("[cacheGet] cache ids:", newsql, ids)
+ err = core.PutCacheSql(cacher, ids, tableName, newsql, args)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ session.engine.logger.Debug("[cacheGet] cache hit sql:", newsql, ids)
+ }
+
+ if len(ids) > 0 {
+ structValue := reflect.Indirect(reflect.ValueOf(bean))
+ id := ids[0]
+ session.engine.logger.Debug("[cacheGet] get bean:", tableName, id)
+ sid, err := id.ToString()
+ if err != nil {
+ return false, err
+ }
+ cacheBean := cacher.GetBean(tableName, sid)
+ if cacheBean == nil {
+ cacheBean = bean
+ has, err = session.nocacheGet(reflect.Struct, table, cacheBean, sqlStr, args...)
+ if err != nil || !has {
+ return has, err
+ }
+
+ session.engine.logger.Debug("[cacheGet] cache bean:", tableName, id, cacheBean)
+ cacher.PutBean(tableName, sid, cacheBean)
+ } else {
+ session.engine.logger.Debug("[cacheGet] cache hit bean:", tableName, id, cacheBean)
+ has = true
+ }
+ structValue.Set(reflect.Indirect(reflect.ValueOf(cacheBean)))
+
+ return has, nil
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_insert.go b/vendor/github.com/go-xorm/xorm/session_insert.go
new file mode 100644
index 0000000..7135656
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_insert.go
@@ -0,0 +1,739 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "xorm.io/core"
+)
+
+// Insert insert one or more beans
+func (session *Session) Insert(beans ...interface{}) (int64, error) {
+ var affected int64
+ var err error
+
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ for _, bean := range beans {
+ switch bean.(type) {
+ case map[string]interface{}:
+ cnt, err := session.insertMapInterface(bean.(map[string]interface{}))
+ if err != nil {
+ return affected, err
+ }
+ affected += cnt
+ case []map[string]interface{}:
+ s := bean.([]map[string]interface{})
+ session.autoResetStatement = false
+ for i := 0; i < len(s); i++ {
+ cnt, err := session.insertMapInterface(s[i])
+ if err != nil {
+ return affected, err
+ }
+ affected += cnt
+ }
+ case map[string]string:
+ cnt, err := session.insertMapString(bean.(map[string]string))
+ if err != nil {
+ return affected, err
+ }
+ affected += cnt
+ case []map[string]string:
+ s := bean.([]map[string]string)
+ session.autoResetStatement = false
+ for i := 0; i < len(s); i++ {
+ cnt, err := session.insertMapString(s[i])
+ if err != nil {
+ return affected, err
+ }
+ affected += cnt
+ }
+ default:
+ sliceValue := reflect.Indirect(reflect.ValueOf(bean))
+ if sliceValue.Kind() == reflect.Slice {
+ size := sliceValue.Len()
+ if size > 0 {
+ if session.engine.SupportInsertMany() {
+ cnt, err := session.innerInsertMulti(bean)
+ if err != nil {
+ return affected, err
+ }
+ affected += cnt
+ } else {
+ for i := 0; i < size; i++ {
+ cnt, err := session.innerInsert(sliceValue.Index(i).Interface())
+ if err != nil {
+ return affected, err
+ }
+ affected += cnt
+ }
+ }
+ }
+ } else {
+ cnt, err := session.innerInsert(bean)
+ if err != nil {
+ return affected, err
+ }
+ affected += cnt
+ }
+ }
+ }
+
+ return affected, err
+}
+
+func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error) {
+ sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
+ if sliceValue.Kind() != reflect.Slice {
+ return 0, errors.New("needs a pointer to a slice")
+ }
+
+ if sliceValue.Len() <= 0 {
+ return 0, errors.New("could not insert a empty slice")
+ }
+
+ if err := session.statement.setRefBean(sliceValue.Index(0).Interface()); err != nil {
+ return 0, err
+ }
+
+ tableName := session.statement.TableName()
+ if len(tableName) <= 0 {
+ return 0, ErrTableNotFound
+ }
+
+ table := session.statement.RefTable
+ size := sliceValue.Len()
+
+ var colNames []string
+ var colMultiPlaces []string
+ var args []interface{}
+ var cols []*core.Column
+
+ for i := 0; i < size; i++ {
+ v := sliceValue.Index(i)
+ vv := reflect.Indirect(v)
+ elemValue := v.Interface()
+ var colPlaces []string
+
+ // handle BeforeInsertProcessor
+ // !nashtsai! does user expect it's same slice to passed closure when using Before()/After() when insert multi??
+ for _, closure := range session.beforeClosures {
+ closure(elemValue)
+ }
+
+ if processor, ok := interface{}(elemValue).(BeforeInsertProcessor); ok {
+ processor.BeforeInsert()
+ }
+ // --
+
+ if i == 0 {
+ for _, col := range table.Columns() {
+ ptrFieldValue, err := col.ValueOfV(&vv)
+ if err != nil {
+ return 0, err
+ }
+ fieldValue := *ptrFieldValue
+ if col.IsAutoIncrement && isZero(fieldValue.Interface()) {
+ continue
+ }
+ if col.MapType == core.ONLYFROMDB {
+ continue
+ }
+ if col.IsDeleted {
+ continue
+ }
+ if session.statement.omitColumnMap.contain(col.Name) {
+ continue
+ }
+ if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) {
+ continue
+ }
+ if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime {
+ val, t := session.engine.nowTime(col)
+ args = append(args, val)
+
+ var colName = col.Name
+ session.afterClosures = append(session.afterClosures, func(bean interface{}) {
+ col := table.GetColumn(colName)
+ setColumnTime(bean, col, t)
+ })
+ } else if col.IsVersion && session.statement.checkVersion {
+ args = append(args, 1)
+ var colName = col.Name
+ session.afterClosures = append(session.afterClosures, func(bean interface{}) {
+ col := table.GetColumn(colName)
+ setColumnInt(bean, col, 1)
+ })
+ } else {
+ arg, err := session.value2Interface(col, fieldValue)
+ if err != nil {
+ return 0, err
+ }
+ args = append(args, arg)
+ }
+
+ colNames = append(colNames, col.Name)
+ cols = append(cols, col)
+ colPlaces = append(colPlaces, "?")
+ }
+ } else {
+ for _, col := range cols {
+ ptrFieldValue, err := col.ValueOfV(&vv)
+ if err != nil {
+ return 0, err
+ }
+ fieldValue := *ptrFieldValue
+
+ if col.IsAutoIncrement && isZero(fieldValue.Interface()) {
+ continue
+ }
+ if col.MapType == core.ONLYFROMDB {
+ continue
+ }
+ if col.IsDeleted {
+ continue
+ }
+ if session.statement.omitColumnMap.contain(col.Name) {
+ continue
+ }
+ if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) {
+ continue
+ }
+ if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime {
+ val, t := session.engine.nowTime(col)
+ args = append(args, val)
+
+ var colName = col.Name
+ session.afterClosures = append(session.afterClosures, func(bean interface{}) {
+ col := table.GetColumn(colName)
+ setColumnTime(bean, col, t)
+ })
+ } else if col.IsVersion && session.statement.checkVersion {
+ args = append(args, 1)
+ var colName = col.Name
+ session.afterClosures = append(session.afterClosures, func(bean interface{}) {
+ col := table.GetColumn(colName)
+ setColumnInt(bean, col, 1)
+ })
+ } else {
+ arg, err := session.value2Interface(col, fieldValue)
+ if err != nil {
+ return 0, err
+ }
+ args = append(args, arg)
+ }
+
+ colPlaces = append(colPlaces, "?")
+ }
+ }
+ colMultiPlaces = append(colMultiPlaces, strings.Join(colPlaces, ", "))
+ }
+ cleanupProcessorsClosures(&session.beforeClosures)
+
+ var sql string
+ if session.engine.dialect.DBType() == core.ORACLE {
+ temp := fmt.Sprintf(") INTO %s (%v) VALUES (",
+ session.engine.Quote(tableName),
+ quoteColumns(colNames, session.engine.Quote, ","))
+ sql = fmt.Sprintf("INSERT ALL INTO %s (%v) VALUES (%v) SELECT 1 FROM DUAL",
+ session.engine.Quote(tableName),
+ quoteColumns(colNames, session.engine.Quote, ","),
+ strings.Join(colMultiPlaces, temp))
+ } else {
+ sql = fmt.Sprintf("INSERT INTO %s (%v) VALUES (%v)",
+ session.engine.Quote(tableName),
+ quoteColumns(colNames, session.engine.Quote, ","),
+ strings.Join(colMultiPlaces, "),("))
+ }
+ res, err := session.exec(sql, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ session.cacheInsert(tableName)
+
+ lenAfterClosures := len(session.afterClosures)
+ for i := 0; i < size; i++ {
+ elemValue := reflect.Indirect(sliceValue.Index(i)).Addr().Interface()
+
+ // handle AfterInsertProcessor
+ if session.isAutoCommit {
+ // !nashtsai! does user expect it's same slice to passed closure when using Before()/After() when insert multi??
+ for _, closure := range session.afterClosures {
+ closure(elemValue)
+ }
+ if processor, ok := interface{}(elemValue).(AfterInsertProcessor); ok {
+ processor.AfterInsert()
+ }
+ } else {
+ if lenAfterClosures > 0 {
+ if value, has := session.afterInsertBeans[elemValue]; has && value != nil {
+ *value = append(*value, session.afterClosures...)
+ } else {
+ afterClosures := make([]func(interface{}), lenAfterClosures)
+ copy(afterClosures, session.afterClosures)
+ session.afterInsertBeans[elemValue] = &afterClosures
+ }
+ } else {
+ if _, ok := interface{}(elemValue).(AfterInsertProcessor); ok {
+ session.afterInsertBeans[elemValue] = nil
+ }
+ }
+ }
+ }
+
+ cleanupProcessorsClosures(&session.afterClosures)
+ return res.RowsAffected()
+}
+
+// InsertMulti insert multiple records
+func (session *Session) InsertMulti(rowsSlicePtr interface{}) (int64, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
+ if sliceValue.Kind() != reflect.Slice {
+ return 0, ErrParamsType
+
+ }
+
+ if sliceValue.Len() <= 0 {
+ return 0, nil
+ }
+
+ return session.innerInsertMulti(rowsSlicePtr)
+}
+
+func (session *Session) innerInsert(bean interface{}) (int64, error) {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return 0, err
+ }
+ if len(session.statement.TableName()) <= 0 {
+ return 0, ErrTableNotFound
+ }
+
+ table := session.statement.RefTable
+
+ // handle BeforeInsertProcessor
+ for _, closure := range session.beforeClosures {
+ closure(bean)
+ }
+ cleanupProcessorsClosures(&session.beforeClosures) // cleanup after used
+
+ if processor, ok := interface{}(bean).(BeforeInsertProcessor); ok {
+ processor.BeforeInsert()
+ }
+
+ colNames, args, err := session.genInsertColumns(bean)
+ if err != nil {
+ return 0, err
+ }
+ // insert expr columns, override if exists
+ exprColumns := session.statement.getExpr()
+ exprColVals := make([]string, 0, len(exprColumns))
+ for _, v := range exprColumns {
+ // remove the expr columns
+ for i, colName := range colNames {
+ if colName == v.colName {
+ colNames = append(colNames[:i], colNames[i+1:]...)
+ args = append(args[:i], args[i+1:]...)
+ }
+ }
+
+ // append expr column to the end
+ colNames = append(colNames, v.colName)
+ exprColVals = append(exprColVals, v.expr)
+ }
+
+ colPlaces := strings.Repeat("?, ", len(colNames)-len(exprColumns))
+ if len(exprColVals) > 0 {
+ colPlaces = colPlaces + strings.Join(exprColVals, ", ")
+ } else {
+ if len(colPlaces) > 0 {
+ colPlaces = colPlaces[0 : len(colPlaces)-2]
+ }
+ }
+
+ var sqlStr string
+ var tableName = session.statement.TableName()
+ var output string
+ if session.engine.dialect.DBType() == core.MSSQL && len(table.AutoIncrement) > 0 {
+ output = fmt.Sprintf(" OUTPUT Inserted.%s", table.AutoIncrement)
+ }
+ if len(colPlaces) > 0 {
+ sqlStr = fmt.Sprintf("INSERT INTO %s (%v)%s VALUES (%v)",
+ session.engine.Quote(tableName),
+ quoteColumns(colNames, session.engine.Quote, ","),
+ output,
+ colPlaces)
+ } else {
+ if session.engine.dialect.DBType() == core.MYSQL {
+ sqlStr = fmt.Sprintf("INSERT INTO %s VALUES ()", session.engine.Quote(tableName))
+ } else {
+ sqlStr = fmt.Sprintf("INSERT INTO %s%s DEFAULT VALUES", session.engine.Quote(tableName), output)
+ }
+ }
+
+ if len(table.AutoIncrement) > 0 && session.engine.dialect.DBType() == core.POSTGRES {
+ sqlStr = sqlStr + " RETURNING " + session.engine.Quote(table.AutoIncrement)
+ }
+
+ handleAfterInsertProcessorFunc := func(bean interface{}) {
+ if session.isAutoCommit {
+ for _, closure := range session.afterClosures {
+ closure(bean)
+ }
+ if processor, ok := interface{}(bean).(AfterInsertProcessor); ok {
+ processor.AfterInsert()
+ }
+ } else {
+ lenAfterClosures := len(session.afterClosures)
+ if lenAfterClosures > 0 {
+ if value, has := session.afterInsertBeans[bean]; has && value != nil {
+ *value = append(*value, session.afterClosures...)
+ } else {
+ afterClosures := make([]func(interface{}), lenAfterClosures)
+ copy(afterClosures, session.afterClosures)
+ session.afterInsertBeans[bean] = &afterClosures
+ }
+
+ } else {
+ if _, ok := interface{}(bean).(AfterInsertProcessor); ok {
+ session.afterInsertBeans[bean] = nil
+ }
+ }
+ }
+ cleanupProcessorsClosures(&session.afterClosures) // cleanup after used
+ }
+
+ // for postgres, many of them didn't implement lastInsertId, so we should
+ // implemented it ourself.
+ if session.engine.dialect.DBType() == core.ORACLE && len(table.AutoIncrement) > 0 {
+ res, err := session.queryBytes("select seq_atable.currval from dual", args...)
+ if err != nil {
+ return 0, err
+ }
+
+ defer handleAfterInsertProcessorFunc(bean)
+
+ session.cacheInsert(tableName)
+
+ if table.Version != "" && session.statement.checkVersion {
+ verValue, err := table.VersionColumn().ValueOf(bean)
+ if err != nil {
+ session.engine.logger.Error(err)
+ } else if verValue.IsValid() && verValue.CanSet() {
+ session.incrVersionFieldValue(verValue)
+ }
+ }
+
+ if len(res) < 1 {
+ return 0, errors.New("insert no error but not returned id")
+ }
+
+ idByte := res[0][table.AutoIncrement]
+ id, err := strconv.ParseInt(string(idByte), 10, 64)
+ if err != nil || id <= 0 {
+ return 1, err
+ }
+
+ aiValue, err := table.AutoIncrColumn().ValueOf(bean)
+ if err != nil {
+ session.engine.logger.Error(err)
+ }
+
+ if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() {
+ return 1, nil
+ }
+
+ aiValue.Set(int64ToIntValue(id, aiValue.Type()))
+
+ return 1, nil
+ } else if len(table.AutoIncrement) > 0 && (session.engine.dialect.DBType() == core.POSTGRES || session.engine.dialect.DBType() == core.MSSQL) {
+ res, err := session.queryBytes(sqlStr, args...)
+
+ if err != nil {
+ return 0, err
+ }
+ defer handleAfterInsertProcessorFunc(bean)
+
+ session.cacheInsert(tableName)
+
+ if table.Version != "" && session.statement.checkVersion {
+ verValue, err := table.VersionColumn().ValueOf(bean)
+ if err != nil {
+ session.engine.logger.Error(err)
+ } else if verValue.IsValid() && verValue.CanSet() {
+ session.incrVersionFieldValue(verValue)
+ }
+ }
+
+ if len(res) < 1 {
+ return 0, errors.New("insert successfully but not returned id")
+ }
+
+ idByte := res[0][table.AutoIncrement]
+ id, err := strconv.ParseInt(string(idByte), 10, 64)
+ if err != nil || id <= 0 {
+ return 1, err
+ }
+
+ aiValue, err := table.AutoIncrColumn().ValueOf(bean)
+ if err != nil {
+ session.engine.logger.Error(err)
+ }
+
+ if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() {
+ return 1, nil
+ }
+
+ aiValue.Set(int64ToIntValue(id, aiValue.Type()))
+
+ return 1, nil
+ } else {
+ res, err := session.exec(sqlStr, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ defer handleAfterInsertProcessorFunc(bean)
+
+ session.cacheInsert(tableName)
+
+ if table.Version != "" && session.statement.checkVersion {
+ verValue, err := table.VersionColumn().ValueOf(bean)
+ if err != nil {
+ session.engine.logger.Error(err)
+ } else if verValue.IsValid() && verValue.CanSet() {
+ session.incrVersionFieldValue(verValue)
+ }
+ }
+
+ if table.AutoIncrement == "" {
+ return res.RowsAffected()
+ }
+
+ var id int64
+ id, err = res.LastInsertId()
+ if err != nil || id <= 0 {
+ return res.RowsAffected()
+ }
+
+ aiValue, err := table.AutoIncrColumn().ValueOf(bean)
+ if err != nil {
+ session.engine.logger.Error(err)
+ }
+
+ if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() {
+ return res.RowsAffected()
+ }
+
+ aiValue.Set(int64ToIntValue(id, aiValue.Type()))
+
+ return res.RowsAffected()
+ }
+}
+
+// InsertOne insert only one struct into database as a record.
+// The in parameter bean must a struct or a point to struct. The return
+// parameter is inserted and error
+func (session *Session) InsertOne(bean interface{}) (int64, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ return session.innerInsert(bean)
+}
+
+func (session *Session) cacheInsert(table string) error {
+ if !session.statement.UseCache {
+ return nil
+ }
+ cacher := session.engine.getCacher(table)
+ if cacher == nil {
+ return nil
+ }
+ session.engine.logger.Debug("[cache] clear sql:", table)
+ cacher.ClearIds(table)
+ return nil
+}
+
+// genInsertColumns generates insert needed columns
+func (session *Session) genInsertColumns(bean interface{}) ([]string, []interface{}, error) {
+ table := session.statement.RefTable
+ colNames := make([]string, 0, len(table.ColumnsSeq()))
+ args := make([]interface{}, 0, len(table.ColumnsSeq()))
+
+ for _, col := range table.Columns() {
+ if col.MapType == core.ONLYFROMDB {
+ continue
+ }
+
+ if col.IsDeleted {
+ continue
+ }
+
+ if session.statement.omitColumnMap.contain(col.Name) {
+ continue
+ }
+
+ if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) {
+ continue
+ }
+
+ if _, ok := session.statement.incrColumns[col.Name]; ok {
+ continue
+ } else if _, ok := session.statement.decrColumns[col.Name]; ok {
+ continue
+ }
+
+ fieldValuePtr, err := col.ValueOf(bean)
+ if err != nil {
+ return nil, nil, err
+ }
+ fieldValue := *fieldValuePtr
+
+ if col.IsAutoIncrement {
+ switch fieldValue.Type().Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64:
+ if fieldValue.Int() == 0 {
+ continue
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64:
+ if fieldValue.Uint() == 0 {
+ continue
+ }
+ case reflect.String:
+ if len(fieldValue.String()) == 0 {
+ continue
+ }
+ case reflect.Ptr:
+ if fieldValue.Pointer() == 0 {
+ continue
+ }
+ }
+ }
+
+ // !evalphobia! set fieldValue as nil when column is nullable and zero-value
+ if _, ok := getFlagForColumn(session.statement.nullableMap, col); ok {
+ if col.Nullable && isZero(fieldValue.Interface()) {
+ var nilValue *int
+ fieldValue = reflect.ValueOf(nilValue)
+ }
+ }
+
+ if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ {
+ // if time is non-empty, then set to auto time
+ val, t := session.engine.nowTime(col)
+ args = append(args, val)
+
+ var colName = col.Name
+ session.afterClosures = append(session.afterClosures, func(bean interface{}) {
+ col := table.GetColumn(colName)
+ setColumnTime(bean, col, t)
+ })
+ } else if col.IsVersion && session.statement.checkVersion {
+ args = append(args, 1)
+ } else {
+ arg, err := session.value2Interface(col, fieldValue)
+ if err != nil {
+ return colNames, args, err
+ }
+ args = append(args, arg)
+ }
+
+ colNames = append(colNames, col.Name)
+ }
+ return colNames, args, nil
+}
+
+func (session *Session) insertMapInterface(m map[string]interface{}) (int64, error) {
+ if len(m) == 0 {
+ return 0, ErrParamsType
+ }
+
+ var columns = make([]string, 0, len(m))
+ for k := range m {
+ columns = append(columns, k)
+ }
+ sort.Strings(columns)
+
+ qm := strings.Repeat("?,", len(columns))
+ qm = "(" + qm[:len(qm)-1] + ")"
+
+ tableName := session.statement.TableName()
+ if len(tableName) <= 0 {
+ return 0, ErrTableNotFound
+ }
+
+ var sql = fmt.Sprintf("INSERT INTO %s (`%s`) VALUES %s", session.engine.Quote(tableName), strings.Join(columns, "`,`"), qm)
+ var args = make([]interface{}, 0, len(m))
+ for _, colName := range columns {
+ args = append(args, m[colName])
+ }
+
+ if err := session.cacheInsert(tableName); err != nil {
+ return 0, err
+ }
+
+ res, err := session.exec(sql, args...)
+ if err != nil {
+ return 0, err
+ }
+ affected, err := res.RowsAffected()
+ if err != nil {
+ return 0, err
+ }
+ return affected, nil
+}
+
+func (session *Session) insertMapString(m map[string]string) (int64, error) {
+ if len(m) == 0 {
+ return 0, ErrParamsType
+ }
+
+ var columns = make([]string, 0, len(m))
+ for k := range m {
+ columns = append(columns, k)
+ }
+ sort.Strings(columns)
+
+ qm := strings.Repeat("?,", len(columns))
+ qm = "(" + qm[:len(qm)-1] + ")"
+
+ tableName := session.statement.TableName()
+ if len(tableName) <= 0 {
+ return 0, ErrTableNotFound
+ }
+
+ var sql = fmt.Sprintf("INSERT INTO %s (`%s`) VALUES %s", session.engine.Quote(tableName), strings.Join(columns, "`,`"), qm)
+ var args = make([]interface{}, 0, len(m))
+ for _, colName := range columns {
+ args = append(args, m[colName])
+ }
+
+ if err := session.cacheInsert(tableName); err != nil {
+ return 0, err
+ }
+
+ res, err := session.exec(sql, args...)
+ if err != nil {
+ return 0, err
+ }
+ affected, err := res.RowsAffected()
+ if err != nil {
+ return 0, err
+ }
+ return affected, nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_iterate.go b/vendor/github.com/go-xorm/xorm/session_iterate.go
new file mode 100644
index 0000000..ca996c2
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_iterate.go
@@ -0,0 +1,100 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import "reflect"
+
+// IterFunc only use by Iterate
+type IterFunc func(idx int, bean interface{}) error
+
+// Rows return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields
+// are conditions.
+func (session *Session) Rows(bean interface{}) (*Rows, error) {
+ return newRows(session, bean)
+}
+
+// Iterate record by record handle records from table, condiBeans's non-empty fields
+// are conditions. beans could be []Struct, []*Struct, map[int64]Struct
+// map[int64]*Struct
+func (session *Session) Iterate(bean interface{}, fun IterFunc) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ if session.statement.lastError != nil {
+ return session.statement.lastError
+ }
+
+ if session.statement.bufferSize > 0 {
+ return session.bufferIterate(bean, fun)
+ }
+
+ rows, err := session.Rows(bean)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ i := 0
+ for rows.Next() {
+ b := reflect.New(rows.beanType).Interface()
+ err = rows.Scan(b)
+ if err != nil {
+ return err
+ }
+ err = fun(i, b)
+ if err != nil {
+ return err
+ }
+ i++
+ }
+ return err
+}
+
+// BufferSize sets the buffersize for iterate
+func (session *Session) BufferSize(size int) *Session {
+ session.statement.bufferSize = size
+ return session
+}
+
+func (session *Session) bufferIterate(bean interface{}, fun IterFunc) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ var bufferSize = session.statement.bufferSize
+ var limit = session.statement.LimitN
+ if limit > 0 && bufferSize > limit {
+ bufferSize = limit
+ }
+ var start = session.statement.Start
+ v := rValue(bean)
+ sliceType := reflect.SliceOf(v.Type())
+ var idx = 0
+ for {
+ slice := reflect.New(sliceType)
+ if err := session.Limit(bufferSize, start).find(slice.Interface(), bean); err != nil {
+ return err
+ }
+
+ for i := 0; i < slice.Elem().Len(); i++ {
+ if err := fun(idx, slice.Elem().Index(i).Addr().Interface()); err != nil {
+ return err
+ }
+ idx++
+ }
+
+ start = start + slice.Elem().Len()
+ if limit > 0 && idx+bufferSize > limit {
+ bufferSize = limit - idx
+ }
+
+ if bufferSize <= 0 || slice.Elem().Len() < bufferSize || idx == limit {
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_query.go b/vendor/github.com/go-xorm/xorm/session_query.go
new file mode 100644
index 0000000..21c00b8
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_query.go
@@ -0,0 +1,320 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "xorm.io/builder"
+ "xorm.io/core"
+)
+
+func (session *Session) genQuerySQL(sqlOrArgs ...interface{}) (string, []interface{}, error) {
+ if len(sqlOrArgs) > 0 {
+ return convertSQLOrArgs(sqlOrArgs...)
+ }
+
+ if session.statement.RawSQL != "" {
+ return session.statement.RawSQL, session.statement.RawParams, nil
+ }
+
+ if len(session.statement.TableName()) <= 0 {
+ return "", nil, ErrTableNotFound
+ }
+
+ var columnStr = session.statement.ColumnStr
+ if len(session.statement.selectStr) > 0 {
+ columnStr = session.statement.selectStr
+ } else {
+ if session.statement.JoinStr == "" {
+ if columnStr == "" {
+ if session.statement.GroupByStr != "" {
+ columnStr = session.engine.quoteColumns(session.statement.GroupByStr)
+ } else {
+ columnStr = session.statement.genColumnStr()
+ }
+ }
+ } else {
+ if columnStr == "" {
+ if session.statement.GroupByStr != "" {
+ columnStr = session.engine.quoteColumns(session.statement.GroupByStr)
+ } else {
+ columnStr = "*"
+ }
+ }
+ }
+ if columnStr == "" {
+ columnStr = "*"
+ }
+ }
+
+ if err := session.statement.processIDParam(); err != nil {
+ return "", nil, err
+ }
+
+ condSQL, condArgs, err := builder.ToSQL(session.statement.cond)
+ if err != nil {
+ return "", nil, err
+ }
+
+ args := append(session.statement.joinArgs, condArgs...)
+ sqlStr, err := session.statement.genSelectSQL(columnStr, condSQL, true, true)
+ if err != nil {
+ return "", nil, err
+ }
+ // for mssql and use limit
+ qs := strings.Count(sqlStr, "?")
+ if len(args)*2 == qs {
+ args = append(args, args...)
+ }
+
+ return sqlStr, args, nil
+}
+
+// Query runs a raw sql and return records as []map[string][]byte
+func (session *Session) Query(sqlOrArgs ...interface{}) ([]map[string][]byte, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ sqlStr, args, err := session.genQuerySQL(sqlOrArgs...)
+ if err != nil {
+ return nil, err
+ }
+
+ return session.queryBytes(sqlStr, args...)
+}
+
+func value2String(rawValue *reflect.Value) (str string, err error) {
+ aa := reflect.TypeOf((*rawValue).Interface())
+ vv := reflect.ValueOf((*rawValue).Interface())
+ switch aa.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ str = strconv.FormatInt(vv.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ str = strconv.FormatUint(vv.Uint(), 10)
+ case reflect.Float32, reflect.Float64:
+ str = strconv.FormatFloat(vv.Float(), 'f', -1, 64)
+ case reflect.String:
+ str = vv.String()
+ case reflect.Array, reflect.Slice:
+ switch aa.Elem().Kind() {
+ case reflect.Uint8:
+ data := rawValue.Interface().([]byte)
+ str = string(data)
+ if str == "\x00" {
+ str = "0"
+ }
+ default:
+ err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
+ }
+ // time type
+ case reflect.Struct:
+ if aa.ConvertibleTo(core.TimeType) {
+ str = vv.Convert(core.TimeType).Interface().(time.Time).Format(time.RFC3339Nano)
+ } else {
+ err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
+ }
+ case reflect.Bool:
+ str = strconv.FormatBool(vv.Bool())
+ case reflect.Complex128, reflect.Complex64:
+ str = fmt.Sprintf("%v", vv.Complex())
+ /* TODO: unsupported types below
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.Uintptr:
+ case reflect.UnsafePointer:
+ case reflect.Chan, reflect.Func, reflect.Interface:
+ */
+ default:
+ err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name())
+ }
+ return
+}
+
+func row2mapStr(rows *core.Rows, fields []string) (resultsMap map[string]string, err error) {
+ result := make(map[string]string)
+ scanResultContainers := make([]interface{}, len(fields))
+ for i := 0; i < len(fields); i++ {
+ var scanResultContainer interface{}
+ scanResultContainers[i] = &scanResultContainer
+ }
+ if err := rows.Scan(scanResultContainers...); err != nil {
+ return nil, err
+ }
+
+ for ii, key := range fields {
+ rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii]))
+ // if row is null then as empty string
+ if rawValue.Interface() == nil {
+ result[key] = ""
+ continue
+ }
+
+ if data, err := value2String(&rawValue); err == nil {
+ result[key] = data
+ } else {
+ return nil, err
+ }
+ }
+ return result, nil
+}
+
+func row2sliceStr(rows *core.Rows, fields []string) (results []string, err error) {
+ result := make([]string, 0, len(fields))
+ scanResultContainers := make([]interface{}, len(fields))
+ for i := 0; i < len(fields); i++ {
+ var scanResultContainer interface{}
+ scanResultContainers[i] = &scanResultContainer
+ }
+ if err := rows.Scan(scanResultContainers...); err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < len(fields); i++ {
+ rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[i]))
+ // if row is null then as empty string
+ if rawValue.Interface() == nil {
+ result = append(result, "")
+ continue
+ }
+
+ if data, err := value2String(&rawValue); err == nil {
+ result = append(result, data)
+ } else {
+ return nil, err
+ }
+ }
+ return result, nil
+}
+
+func rows2Strings(rows *core.Rows) (resultsSlice []map[string]string, err error) {
+ fields, err := rows.Columns()
+ if err != nil {
+ return nil, err
+ }
+ for rows.Next() {
+ result, err := row2mapStr(rows, fields)
+ if err != nil {
+ return nil, err
+ }
+ resultsSlice = append(resultsSlice, result)
+ }
+
+ return resultsSlice, nil
+}
+
+func rows2SliceString(rows *core.Rows) (resultsSlice [][]string, err error) {
+ fields, err := rows.Columns()
+ if err != nil {
+ return nil, err
+ }
+ for rows.Next() {
+ record, err := row2sliceStr(rows, fields)
+ if err != nil {
+ return nil, err
+ }
+ resultsSlice = append(resultsSlice, record)
+ }
+
+ return resultsSlice, nil
+}
+
+// QueryString runs a raw sql and return records as []map[string]string
+func (session *Session) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ sqlStr, args, err := session.genQuerySQL(sqlOrArgs...)
+ if err != nil {
+ return nil, err
+ }
+
+ rows, err := session.queryRows(sqlStr, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ return rows2Strings(rows)
+}
+
+// QuerySliceString runs a raw sql and return records as [][]string
+func (session *Session) QuerySliceString(sqlOrArgs ...interface{}) ([][]string, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ sqlStr, args, err := session.genQuerySQL(sqlOrArgs...)
+ if err != nil {
+ return nil, err
+ }
+
+ rows, err := session.queryRows(sqlStr, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ return rows2SliceString(rows)
+}
+
+func row2mapInterface(rows *core.Rows, fields []string) (resultsMap map[string]interface{}, err error) {
+ resultsMap = make(map[string]interface{}, len(fields))
+ scanResultContainers := make([]interface{}, len(fields))
+ for i := 0; i < len(fields); i++ {
+ var scanResultContainer interface{}
+ scanResultContainers[i] = &scanResultContainer
+ }
+ if err := rows.Scan(scanResultContainers...); err != nil {
+ return nil, err
+ }
+
+ for ii, key := range fields {
+ resultsMap[key] = reflect.Indirect(reflect.ValueOf(scanResultContainers[ii])).Interface()
+ }
+ return
+}
+
+func rows2Interfaces(rows *core.Rows) (resultsSlice []map[string]interface{}, err error) {
+ fields, err := rows.Columns()
+ if err != nil {
+ return nil, err
+ }
+ for rows.Next() {
+ result, err := row2mapInterface(rows, fields)
+ if err != nil {
+ return nil, err
+ }
+ resultsSlice = append(resultsSlice, result)
+ }
+
+ return resultsSlice, nil
+}
+
+// QueryInterface runs a raw sql and return records as []map[string]interface{}
+func (session *Session) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ sqlStr, args, err := session.genQuerySQL(sqlOrArgs...)
+ if err != nil {
+ return nil, err
+ }
+
+ rows, err := session.queryRows(sqlStr, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ return rows2Interfaces(rows)
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_raw.go b/vendor/github.com/go-xorm/xorm/session_raw.go
new file mode 100644
index 0000000..67648ef
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_raw.go
@@ -0,0 +1,227 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql"
+ "reflect"
+ "time"
+
+ "xorm.io/builder"
+ "xorm.io/core"
+)
+
+func (session *Session) queryPreprocess(sqlStr *string, paramStr ...interface{}) {
+ for _, filter := range session.engine.dialect.Filters() {
+ *sqlStr = filter.Do(*sqlStr, session.engine.dialect, session.statement.RefTable)
+ }
+
+ session.lastSQL = *sqlStr
+ session.lastSQLArgs = paramStr
+}
+
+func (session *Session) queryRows(sqlStr string, args ...interface{}) (*core.Rows, error) {
+ defer session.resetStatement()
+
+ session.queryPreprocess(&sqlStr, args...)
+
+ if session.engine.showSQL {
+ if session.engine.showExecTime {
+ b4ExecTime := time.Now()
+ defer func() {
+ execDuration := time.Since(b4ExecTime)
+ if len(args) > 0 {
+ session.engine.logger.Infof("[SQL] %s %#v - took: %v", sqlStr, args, execDuration)
+ } else {
+ session.engine.logger.Infof("[SQL] %s - took: %v", sqlStr, execDuration)
+ }
+ }()
+ } else {
+ if len(args) > 0 {
+ session.engine.logger.Infof("[SQL] %v %#v", sqlStr, args)
+ } else {
+ session.engine.logger.Infof("[SQL] %v", sqlStr)
+ }
+ }
+ }
+
+ if session.isAutoCommit {
+ var db *core.DB
+ if session.sessionType == groupSession {
+ db = session.engine.engineGroup.Slave().DB()
+ } else {
+ db = session.DB()
+ }
+
+ if session.prepareStmt {
+ // don't clear stmt since session will cache them
+ stmt, err := session.doPrepare(db, sqlStr)
+ if err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.QueryContext(session.ctx, args...)
+ if err != nil {
+ return nil, err
+ }
+ return rows, nil
+ }
+
+ rows, err := db.QueryContext(session.ctx, sqlStr, args...)
+ if err != nil {
+ return nil, err
+ }
+ return rows, nil
+ }
+
+ rows, err := session.tx.QueryContext(session.ctx, sqlStr, args...)
+ if err != nil {
+ return nil, err
+ }
+ return rows, nil
+}
+
+func (session *Session) queryRow(sqlStr string, args ...interface{}) *core.Row {
+ return core.NewRow(session.queryRows(sqlStr, args...))
+}
+
+func value2Bytes(rawValue *reflect.Value) ([]byte, error) {
+ str, err := value2String(rawValue)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(str), nil
+}
+
+func row2map(rows *core.Rows, fields []string) (resultsMap map[string][]byte, err error) {
+ result := make(map[string][]byte)
+ scanResultContainers := make([]interface{}, len(fields))
+ for i := 0; i < len(fields); i++ {
+ var scanResultContainer interface{}
+ scanResultContainers[i] = &scanResultContainer
+ }
+ if err := rows.Scan(scanResultContainers...); err != nil {
+ return nil, err
+ }
+
+ for ii, key := range fields {
+ rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii]))
+ //if row is null then ignore
+ if rawValue.Interface() == nil {
+ result[key] = []byte{}
+ continue
+ }
+
+ if data, err := value2Bytes(&rawValue); err == nil {
+ result[key] = data
+ } else {
+ return nil, err // !nashtsai! REVIEW, should return err or just error log?
+ }
+ }
+ return result, nil
+}
+
+func rows2maps(rows *core.Rows) (resultsSlice []map[string][]byte, err error) {
+ fields, err := rows.Columns()
+ if err != nil {
+ return nil, err
+ }
+ for rows.Next() {
+ result, err := row2map(rows, fields)
+ if err != nil {
+ return nil, err
+ }
+ resultsSlice = append(resultsSlice, result)
+ }
+
+ return resultsSlice, nil
+}
+
+func (session *Session) queryBytes(sqlStr string, args ...interface{}) ([]map[string][]byte, error) {
+ rows, err := session.queryRows(sqlStr, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ return rows2maps(rows)
+}
+
+func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, error) {
+ defer session.resetStatement()
+
+ session.queryPreprocess(&sqlStr, args...)
+
+ if session.engine.showSQL {
+ if session.engine.showExecTime {
+ b4ExecTime := time.Now()
+ defer func() {
+ execDuration := time.Since(b4ExecTime)
+ if len(args) > 0 {
+ session.engine.logger.Infof("[SQL] %s %#v - took: %v", sqlStr, args, execDuration)
+ } else {
+ session.engine.logger.Infof("[SQL] %s - took: %v", sqlStr, execDuration)
+ }
+ }()
+ } else {
+ if len(args) > 0 {
+ session.engine.logger.Infof("[SQL] %v %#v", sqlStr, args)
+ } else {
+ session.engine.logger.Infof("[SQL] %v", sqlStr)
+ }
+ }
+ }
+
+ if !session.isAutoCommit {
+ return session.tx.ExecContext(session.ctx, sqlStr, args...)
+ }
+
+ if session.prepareStmt {
+ stmt, err := session.doPrepare(session.DB(), sqlStr)
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := stmt.ExecContext(session.ctx, args...)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+ }
+
+ return session.DB().ExecContext(session.ctx, sqlStr, args...)
+}
+
+func convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) {
+ switch sqlOrArgs[0].(type) {
+ case string:
+ return sqlOrArgs[0].(string), sqlOrArgs[1:], nil
+ case *builder.Builder:
+ return sqlOrArgs[0].(*builder.Builder).ToSQL()
+ case builder.Builder:
+ bd := sqlOrArgs[0].(builder.Builder)
+ return bd.ToSQL()
+ }
+
+ return "", nil, ErrUnSupportedType
+}
+
+// Exec raw sql
+func (session *Session) Exec(sqlOrArgs ...interface{}) (sql.Result, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ if len(sqlOrArgs) == 0 {
+ return nil, ErrUnSupportedType
+ }
+
+ sqlStr, args, err := convertSQLOrArgs(sqlOrArgs...)
+ if err != nil {
+ return nil, err
+ }
+
+ return session.exec(sqlStr, args...)
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_schema.go b/vendor/github.com/go-xorm/xorm/session_schema.go
new file mode 100644
index 0000000..da5c885
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_schema.go
@@ -0,0 +1,421 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+
+ "xorm.io/core"
+)
+
+// Ping test if database is ok
+func (session *Session) Ping() error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName())
+ return session.DB().PingContext(session.ctx)
+}
+
+// CreateTable create a table according a bean
+func (session *Session) CreateTable(bean interface{}) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ return session.createTable(bean)
+}
+
+func (session *Session) createTable(bean interface{}) error {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+
+ sqlStr := session.statement.genCreateTableSQL()
+ _, err := session.exec(sqlStr)
+ return err
+}
+
+// CreateIndexes create indexes
+func (session *Session) CreateIndexes(bean interface{}) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ return session.createIndexes(bean)
+}
+
+func (session *Session) createIndexes(bean interface{}) error {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+
+ sqls := session.statement.genIndexSQL()
+ for _, sqlStr := range sqls {
+ _, err := session.exec(sqlStr)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CreateUniques create uniques
+func (session *Session) CreateUniques(bean interface{}) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+ return session.createUniques(bean)
+}
+
+func (session *Session) createUniques(bean interface{}) error {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+
+ sqls := session.statement.genUniqueSQL()
+ for _, sqlStr := range sqls {
+ _, err := session.exec(sqlStr)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// DropIndexes drop indexes
+func (session *Session) DropIndexes(bean interface{}) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ return session.dropIndexes(bean)
+}
+
+func (session *Session) dropIndexes(bean interface{}) error {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return err
+ }
+
+ sqls := session.statement.genDelIndexSQL()
+ for _, sqlStr := range sqls {
+ _, err := session.exec(sqlStr)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// DropTable drop table will drop table if exist, if drop failed, it will return error
+func (session *Session) DropTable(beanOrTableName interface{}) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ return session.dropTable(beanOrTableName)
+}
+
+func (session *Session) dropTable(beanOrTableName interface{}) error {
+ tableName := session.engine.TableName(beanOrTableName)
+ var needDrop = true
+ if !session.engine.dialect.SupportDropIfExists() {
+ sqlStr, args := session.engine.dialect.TableCheckSql(tableName)
+ results, err := session.queryBytes(sqlStr, args...)
+ if err != nil {
+ return err
+ }
+ needDrop = len(results) > 0
+ }
+
+ if needDrop {
+ sqlStr := session.engine.Dialect().DropTableSql(session.engine.TableName(tableName, true))
+ _, err := session.exec(sqlStr)
+ return err
+ }
+ return nil
+}
+
+// IsTableExist if a table is exist
+func (session *Session) IsTableExist(beanOrTableName interface{}) (bool, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ tableName := session.engine.TableName(beanOrTableName)
+
+ return session.isTableExist(tableName)
+}
+
+func (session *Session) isTableExist(tableName string) (bool, error) {
+ sqlStr, args := session.engine.dialect.TableCheckSql(tableName)
+ results, err := session.queryBytes(sqlStr, args...)
+ return len(results) > 0, err
+}
+
+// IsTableEmpty if table have any records
+func (session *Session) IsTableEmpty(bean interface{}) (bool, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+ return session.isTableEmpty(session.engine.TableName(bean))
+}
+
+func (session *Session) isTableEmpty(tableName string) (bool, error) {
+ var total int64
+ sqlStr := fmt.Sprintf("select count(*) from %s", session.engine.Quote(session.engine.TableName(tableName, true)))
+ err := session.queryRow(sqlStr).Scan(&total)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ err = nil
+ }
+ return true, err
+ }
+
+ return total == 0, nil
+}
+
+// find if index is exist according cols
+func (session *Session) isIndexExist2(tableName string, cols []string, unique bool) (bool, error) {
+ indexes, err := session.engine.dialect.GetIndexes(tableName)
+ if err != nil {
+ return false, err
+ }
+
+ for _, index := range indexes {
+ if sliceEq(index.Cols, cols) {
+ if unique {
+ return index.Type == core.UniqueType, nil
+ }
+ return index.Type == core.IndexType, nil
+ }
+ }
+ return false, nil
+}
+
+func (session *Session) addColumn(colName string) error {
+ col := session.statement.RefTable.GetColumn(colName)
+ sql, args := session.statement.genAddColumnStr(col)
+ _, err := session.exec(sql, args...)
+ return err
+}
+
+func (session *Session) addIndex(tableName, idxName string) error {
+ index := session.statement.RefTable.Indexes[idxName]
+ sqlStr := session.engine.dialect.CreateIndexSql(tableName, index)
+ _, err := session.exec(sqlStr)
+ return err
+}
+
+func (session *Session) addUnique(tableName, uqeName string) error {
+ index := session.statement.RefTable.Indexes[uqeName]
+ sqlStr := session.engine.dialect.CreateIndexSql(tableName, index)
+ _, err := session.exec(sqlStr)
+ return err
+}
+
+// Sync2 synchronize structs to database tables
+func (session *Session) Sync2(beans ...interface{}) error {
+ engine := session.engine
+
+ if session.isAutoClose {
+ session.isAutoClose = false
+ defer session.Close()
+ }
+
+ tables, err := engine.DBMetas()
+ if err != nil {
+ return err
+ }
+
+ session.autoResetStatement = false
+ defer func() {
+ session.autoResetStatement = true
+ session.resetStatement()
+ }()
+
+ var structTables []*core.Table
+
+ for _, bean := range beans {
+ v := rValue(bean)
+ table, err := engine.mapType(v)
+ if err != nil {
+ return err
+ }
+ structTables = append(structTables, table)
+ tbName := engine.TableName(bean)
+ tbNameWithSchema := engine.TableName(tbName, true)
+
+ var oriTable *core.Table
+ for _, tb := range tables {
+ if strings.EqualFold(tb.Name, tbName) {
+ oriTable = tb
+ break
+ }
+ }
+
+ if oriTable == nil {
+ err = session.StoreEngine(session.statement.StoreEngine).createTable(bean)
+ if err != nil {
+ return err
+ }
+
+ err = session.createUniques(bean)
+ if err != nil {
+ return err
+ }
+
+ err = session.createIndexes(bean)
+ if err != nil {
+ return err
+ }
+ } else {
+ for _, col := range table.Columns() {
+ var oriCol *core.Column
+ for _, col2 := range oriTable.Columns() {
+ if strings.EqualFold(col.Name, col2.Name) {
+ oriCol = col2
+ break
+ }
+ }
+
+ if oriCol != nil {
+ expectedType := engine.dialect.SqlType(col)
+ curType := engine.dialect.SqlType(oriCol)
+ if expectedType != curType {
+ if expectedType == core.Text &&
+ strings.HasPrefix(curType, core.Varchar) {
+ // currently only support mysql & postgres
+ if engine.dialect.DBType() == core.MYSQL ||
+ engine.dialect.DBType() == core.POSTGRES {
+ engine.logger.Infof("Table %s column %s change type from %s to %s\n",
+ tbNameWithSchema, col.Name, curType, expectedType)
+ _, err = session.exec(engine.dialect.ModifyColumnSql(tbNameWithSchema, col))
+ } else {
+ engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s\n",
+ tbNameWithSchema, col.Name, curType, expectedType)
+ }
+ } else if strings.HasPrefix(curType, core.Varchar) && strings.HasPrefix(expectedType, core.Varchar) {
+ if engine.dialect.DBType() == core.MYSQL {
+ if oriCol.Length < col.Length {
+ engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n",
+ tbNameWithSchema, col.Name, oriCol.Length, col.Length)
+ _, err = session.exec(engine.dialect.ModifyColumnSql(tbNameWithSchema, col))
+ }
+ }
+ } else {
+ if !(strings.HasPrefix(curType, expectedType) && curType[len(expectedType)] == '(') {
+ engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s",
+ tbNameWithSchema, col.Name, curType, expectedType)
+ }
+ }
+ } else if expectedType == core.Varchar {
+ if engine.dialect.DBType() == core.MYSQL {
+ if oriCol.Length < col.Length {
+ engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n",
+ tbNameWithSchema, col.Name, oriCol.Length, col.Length)
+ _, err = session.exec(engine.dialect.ModifyColumnSql(tbNameWithSchema, col))
+ }
+ }
+ }
+ if col.Default != oriCol.Default {
+ engine.logger.Warnf("Table %s Column %s db default is %s, struct default is %s",
+ tbName, col.Name, oriCol.Default, col.Default)
+ }
+ if col.Nullable != oriCol.Nullable {
+ engine.logger.Warnf("Table %s Column %s db nullable is %v, struct nullable is %v",
+ tbName, col.Name, oriCol.Nullable, col.Nullable)
+ }
+ } else {
+ session.statement.RefTable = table
+ session.statement.tableName = tbNameWithSchema
+ err = session.addColumn(col.Name)
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ var foundIndexNames = make(map[string]bool)
+ var addedNames = make(map[string]*core.Index)
+
+ for name, index := range table.Indexes {
+ var oriIndex *core.Index
+ for name2, index2 := range oriTable.Indexes {
+ if index.Equal(index2) {
+ oriIndex = index2
+ foundIndexNames[name2] = true
+ break
+ }
+ }
+
+ if oriIndex != nil {
+ if oriIndex.Type != index.Type {
+ sql := engine.dialect.DropIndexSql(tbNameWithSchema, oriIndex)
+ _, err = session.exec(sql)
+ if err != nil {
+ return err
+ }
+ oriIndex = nil
+ }
+ }
+
+ if oriIndex == nil {
+ addedNames[name] = index
+ }
+ }
+
+ for name2, index2 := range oriTable.Indexes {
+ if _, ok := foundIndexNames[name2]; !ok {
+ sql := engine.dialect.DropIndexSql(tbNameWithSchema, index2)
+ _, err = session.exec(sql)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ for name, index := range addedNames {
+ if index.Type == core.UniqueType {
+ session.statement.RefTable = table
+ session.statement.tableName = tbNameWithSchema
+ err = session.addUnique(tbNameWithSchema, name)
+ } else if index.Type == core.IndexType {
+ session.statement.RefTable = table
+ session.statement.tableName = tbNameWithSchema
+ err = session.addIndex(tbNameWithSchema, name)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ for _, table := range tables {
+ var oriTable *core.Table
+ for _, structTable := range structTables {
+ if strings.EqualFold(table.Name, session.tbNameNoSchema(structTable)) {
+ oriTable = structTable
+ break
+ }
+ }
+
+ if oriTable == nil {
+ //engine.LogWarnf("Table %s has no struct to mapping it", table.Name)
+ continue
+ }
+
+ for _, colName := range table.ColumnsSeq() {
+ if oriTable.GetColumn(colName) == nil {
+ engine.logger.Warnf("Table %s has column %s but struct has not related field", engine.TableName(table.Name, true), colName)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_stats.go b/vendor/github.com/go-xorm/xorm/session_stats.go
new file mode 100644
index 0000000..c2cac83
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_stats.go
@@ -0,0 +1,98 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql"
+ "errors"
+ "reflect"
+)
+
+// Count counts the records. bean's non-empty fields
+// are conditions.
+func (session *Session) Count(bean ...interface{}) (int64, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ var sqlStr string
+ var args []interface{}
+ var err error
+ if session.statement.RawSQL == "" {
+ sqlStr, args, err = session.statement.genCountSQL(bean...)
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ sqlStr = session.statement.RawSQL
+ args = session.statement.RawParams
+ }
+
+ var total int64
+ err = session.queryRow(sqlStr, args...).Scan(&total)
+ if err == sql.ErrNoRows || err == nil {
+ return total, nil
+ }
+
+ return 0, err
+}
+
+// sum call sum some column. bean's non-empty fields are conditions.
+func (session *Session) sum(res interface{}, bean interface{}, columnNames ...string) error {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ v := reflect.ValueOf(res)
+ if v.Kind() != reflect.Ptr {
+ return errors.New("need a pointer to a variable")
+ }
+
+ var isSlice = v.Elem().Kind() == reflect.Slice
+ var sqlStr string
+ var args []interface{}
+ var err error
+ if len(session.statement.RawSQL) == 0 {
+ sqlStr, args, err = session.statement.genSumSQL(bean, columnNames...)
+ if err != nil {
+ return err
+ }
+ } else {
+ sqlStr = session.statement.RawSQL
+ args = session.statement.RawParams
+ }
+
+ if isSlice {
+ err = session.queryRow(sqlStr, args...).ScanSlice(res)
+ } else {
+ err = session.queryRow(sqlStr, args...).Scan(res)
+ }
+ if err == sql.ErrNoRows || err == nil {
+ return nil
+ }
+ return err
+}
+
+// Sum call sum some column. bean's non-empty fields are conditions.
+func (session *Session) Sum(bean interface{}, columnName string) (res float64, err error) {
+ return res, session.sum(&res, bean, columnName)
+}
+
+// SumInt call sum some column. bean's non-empty fields are conditions.
+func (session *Session) SumInt(bean interface{}, columnName string) (res int64, err error) {
+ return res, session.sum(&res, bean, columnName)
+}
+
+// Sums call sum some columns. bean's non-empty fields are conditions.
+func (session *Session) Sums(bean interface{}, columnNames ...string) ([]float64, error) {
+ var res = make([]float64, len(columnNames), len(columnNames))
+ return res, session.sum(&res, bean, columnNames...)
+}
+
+// SumsInt sum specify columns and return as []int64 instead of []float64
+func (session *Session) SumsInt(bean interface{}, columnNames ...string) ([]int64, error) {
+ var res = make([]int64, len(columnNames), len(columnNames))
+ return res, session.sum(&res, bean, columnNames...)
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_tx.go b/vendor/github.com/go-xorm/xorm/session_tx.go
new file mode 100644
index 0000000..ee3d473
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_tx.go
@@ -0,0 +1,83 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+// Begin a transaction
+func (session *Session) Begin() error {
+ if session.isAutoCommit {
+ tx, err := session.DB().BeginTx(session.ctx, nil)
+ if err != nil {
+ return err
+ }
+ session.isAutoCommit = false
+ session.isCommitedOrRollbacked = false
+ session.tx = tx
+ session.saveLastSQL("BEGIN TRANSACTION")
+ }
+ return nil
+}
+
+// Rollback When using transaction, you can rollback if any error
+func (session *Session) Rollback() error {
+ if !session.isAutoCommit && !session.isCommitedOrRollbacked {
+ session.saveLastSQL(session.engine.dialect.RollBackStr())
+ session.isCommitedOrRollbacked = true
+ session.isAutoCommit = true
+ return session.tx.Rollback()
+ }
+ return nil
+}
+
+// Commit When using transaction, Commit will commit all operations.
+func (session *Session) Commit() error {
+ if !session.isAutoCommit && !session.isCommitedOrRollbacked {
+ session.saveLastSQL("COMMIT")
+ session.isCommitedOrRollbacked = true
+ session.isAutoCommit = true
+ var err error
+ if err = session.tx.Commit(); err == nil {
+ // handle processors after tx committed
+ closureCallFunc := func(closuresPtr *[]func(interface{}), bean interface{}) {
+ if closuresPtr != nil {
+ for _, closure := range *closuresPtr {
+ closure(bean)
+ }
+ }
+ }
+
+ for bean, closuresPtr := range session.afterInsertBeans {
+ closureCallFunc(closuresPtr, bean)
+
+ if processor, ok := interface{}(bean).(AfterInsertProcessor); ok {
+ processor.AfterInsert()
+ }
+ }
+ for bean, closuresPtr := range session.afterUpdateBeans {
+ closureCallFunc(closuresPtr, bean)
+
+ if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok {
+ processor.AfterUpdate()
+ }
+ }
+ for bean, closuresPtr := range session.afterDeleteBeans {
+ closureCallFunc(closuresPtr, bean)
+
+ if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok {
+ processor.AfterDelete()
+ }
+ }
+ cleanUpFunc := func(slices *map[interface{}]*[]func(interface{})) {
+ if len(*slices) > 0 {
+ *slices = make(map[interface{}]*[]func(interface{}), 0)
+ }
+ }
+ cleanUpFunc(&session.afterInsertBeans)
+ cleanUpFunc(&session.afterUpdateBeans)
+ cleanUpFunc(&session.afterDeleteBeans)
+ }
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/session_update.go b/vendor/github.com/go-xorm/xorm/session_update.go
new file mode 100644
index 0000000..85b0bb0
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/session_update.go
@@ -0,0 +1,512 @@
+// Copyright 2016 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "xorm.io/builder"
+ "xorm.io/core"
+)
+
+func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, args ...interface{}) error {
+ if table == nil ||
+ session.tx != nil {
+ return ErrCacheFailed
+ }
+
+ oldhead, newsql := session.statement.convertUpdateSQL(sqlStr)
+ if newsql == "" {
+ return ErrCacheFailed
+ }
+ for _, filter := range session.engine.dialect.Filters() {
+ newsql = filter.Do(newsql, session.engine.dialect, table)
+ }
+ session.engine.logger.Debug("[cacheUpdate] new sql", oldhead, newsql)
+
+ var nStart int
+ if len(args) > 0 {
+ if strings.Index(sqlStr, "?") > -1 {
+ nStart = strings.Count(oldhead, "?")
+ } else {
+ // only for pq, TODO: if any other databse?
+ nStart = strings.Count(oldhead, "$")
+ }
+ }
+
+ cacher := session.engine.getCacher(tableName)
+ session.engine.logger.Debug("[cacheUpdate] get cache sql", newsql, args[nStart:])
+ ids, err := core.GetCacheSql(cacher, tableName, newsql, args[nStart:])
+ if err != nil {
+ rows, err := session.NoCache().queryRows(newsql, args[nStart:]...)
+ if err != nil {
+ return err
+ }
+ defer rows.Close()
+
+ ids = make([]core.PK, 0)
+ for rows.Next() {
+ var res = make([]string, len(table.PrimaryKeys))
+ err = rows.ScanSlice(&res)
+ if err != nil {
+ return err
+ }
+ var pk core.PK = make([]interface{}, len(table.PrimaryKeys))
+ for i, col := range table.PKColumns() {
+ if col.SQLType.IsNumeric() {
+ n, err := strconv.ParseInt(res[i], 10, 64)
+ if err != nil {
+ return err
+ }
+ pk[i] = n
+ } else if col.SQLType.IsText() {
+ pk[i] = res[i]
+ } else {
+ return errors.New("not supported")
+ }
+ }
+
+ ids = append(ids, pk)
+ }
+ session.engine.logger.Debug("[cacheUpdate] find updated id", ids)
+ } /*else {
+ session.engine.LogDebug("[xorm:cacheUpdate] del cached sql:", tableName, newsql, args)
+ cacher.DelIds(tableName, genSqlKey(newsql, args))
+ }*/
+
+ for _, id := range ids {
+ sid, err := id.ToString()
+ if err != nil {
+ return err
+ }
+ if bean := cacher.GetBean(tableName, sid); bean != nil {
+ sqls := splitNNoCase(sqlStr, "where", 2)
+ if len(sqls) == 0 || len(sqls) > 2 {
+ return ErrCacheFailed
+ }
+
+ sqls = splitNNoCase(sqls[0], "set", 2)
+ if len(sqls) != 2 {
+ return ErrCacheFailed
+ }
+ kvs := strings.Split(strings.TrimSpace(sqls[1]), ",")
+
+ for idx, kv := range kvs {
+ sps := strings.SplitN(kv, "=", 2)
+ sps2 := strings.Split(sps[0], ".")
+ colName := sps2[len(sps2)-1]
+ // treat quote prefix, suffix and '`' as quotes
+ quotes := append(strings.Split(session.engine.Quote(""), ""), "`")
+ if strings.ContainsAny(colName, strings.Join(quotes, "")) {
+ colName = strings.TrimSpace(eraseAny(colName, quotes...))
+ } else {
+ session.engine.logger.Debug("[cacheUpdate] cannot find column", tableName, colName)
+ return ErrCacheFailed
+ }
+
+ if col := table.GetColumn(colName); col != nil {
+ fieldValue, err := col.ValueOf(bean)
+ if err != nil {
+ session.engine.logger.Error(err)
+ } else {
+ session.engine.logger.Debug("[cacheUpdate] set bean field", bean, colName, fieldValue.Interface())
+ if col.IsVersion && session.statement.checkVersion {
+ session.incrVersionFieldValue(fieldValue)
+ } else {
+ fieldValue.Set(reflect.ValueOf(args[idx]))
+ }
+ }
+ } else {
+ session.engine.logger.Errorf("[cacheUpdate] ERROR: column %v is not table %v's",
+ colName, table.Name)
+ }
+ }
+
+ session.engine.logger.Debug("[cacheUpdate] update cache", tableName, id, bean)
+ cacher.PutBean(tableName, sid, bean)
+ }
+ }
+ session.engine.logger.Debug("[cacheUpdate] clear cached table sql:", tableName)
+ cacher.ClearIds(tableName)
+ return nil
+}
+
+// Update records, bean's non-empty fields are updated contents,
+// condiBean' non-empty filds are conditions
+// CAUTION:
+// 1.bool will defaultly be updated content nor conditions
+// You should call UseBool if you have bool to use.
+// 2.float32 & float64 may be not inexact as conditions
+func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int64, error) {
+ if session.isAutoClose {
+ defer session.Close()
+ }
+
+ if session.statement.lastError != nil {
+ return 0, session.statement.lastError
+ }
+
+ v := rValue(bean)
+ t := v.Type()
+
+ var colNames []string
+ var args []interface{}
+
+ // handle before update processors
+ for _, closure := range session.beforeClosures {
+ closure(bean)
+ }
+ cleanupProcessorsClosures(&session.beforeClosures) // cleanup after used
+ if processor, ok := interface{}(bean).(BeforeUpdateProcessor); ok {
+ processor.BeforeUpdate()
+ }
+ // --
+
+ var err error
+ var isMap = t.Kind() == reflect.Map
+ var isStruct = t.Kind() == reflect.Struct
+ if isStruct {
+ if err := session.statement.setRefBean(bean); err != nil {
+ return 0, err
+ }
+
+ if len(session.statement.TableName()) <= 0 {
+ return 0, ErrTableNotFound
+ }
+
+ if session.statement.ColumnStr == "" {
+ colNames, args = session.statement.buildUpdates(bean, false, false,
+ false, false, true)
+ } else {
+ colNames, args, err = session.genUpdateColumns(bean)
+ if err != nil {
+ return 0, err
+ }
+ }
+ } else if isMap {
+ colNames = make([]string, 0)
+ args = make([]interface{}, 0)
+ bValue := reflect.Indirect(reflect.ValueOf(bean))
+
+ for _, v := range bValue.MapKeys() {
+ colNames = append(colNames, session.engine.Quote(v.String())+" = ?")
+ args = append(args, bValue.MapIndex(v).Interface())
+ }
+ } else {
+ return 0, ErrParamsType
+ }
+
+ table := session.statement.RefTable
+
+ if session.statement.UseAutoTime && table != nil && table.Updated != "" {
+ if !session.statement.columnMap.contain(table.Updated) &&
+ !session.statement.omitColumnMap.contain(table.Updated) {
+ colNames = append(colNames, session.engine.Quote(table.Updated)+" = ?")
+ col := table.UpdatedColumn()
+ val, t := session.engine.nowTime(col)
+ args = append(args, val)
+
+ var colName = col.Name
+ if isStruct {
+ session.afterClosures = append(session.afterClosures, func(bean interface{}) {
+ col := table.GetColumn(colName)
+ setColumnTime(bean, col, t)
+ })
+ }
+ }
+ }
+
+ // for update action to like "column = column + ?"
+ incColumns := session.statement.getInc()
+ for _, v := range incColumns {
+ colNames = append(colNames, session.engine.Quote(v.colName)+" = "+session.engine.Quote(v.colName)+" + ?")
+ args = append(args, v.arg)
+ }
+ // for update action to like "column = column - ?"
+ decColumns := session.statement.getDec()
+ for _, v := range decColumns {
+ colNames = append(colNames, session.engine.Quote(v.colName)+" = "+session.engine.Quote(v.colName)+" - ?")
+ args = append(args, v.arg)
+ }
+ // for update action to like "column = expression"
+ exprColumns := session.statement.getExpr()
+ for _, v := range exprColumns {
+ colNames = append(colNames, session.engine.Quote(v.colName)+" = "+v.expr)
+ }
+
+ if err = session.statement.processIDParam(); err != nil {
+ return 0, err
+ }
+
+ var autoCond builder.Cond
+ if !session.statement.noAutoCondition {
+ condBeanIsStruct := false
+ if len(condiBean) > 0 {
+ if c, ok := condiBean[0].(map[string]interface{}); ok {
+ autoCond = builder.Eq(c)
+ } else {
+ ct := reflect.TypeOf(condiBean[0])
+ k := ct.Kind()
+ if k == reflect.Ptr {
+ k = ct.Elem().Kind()
+ }
+ if k == reflect.Struct {
+ var err error
+ autoCond, err = session.statement.buildConds(session.statement.RefTable, condiBean[0], true, true, false, true, false)
+ if err != nil {
+ return 0, err
+ }
+ condBeanIsStruct = true
+ } else {
+ return 0, ErrConditionType
+ }
+ }
+ }
+
+ if !condBeanIsStruct && table != nil {
+ if col := table.DeletedColumn(); col != nil && !session.statement.unscoped { // tag "deleted" is enabled
+ autoCond1 := session.engine.CondDeleted(session.engine.Quote(col.Name))
+
+ if autoCond == nil {
+ autoCond = autoCond1
+ } else {
+ autoCond = autoCond.And(autoCond1)
+ }
+ }
+ }
+ }
+
+ st := &session.statement
+
+ var sqlStr string
+ var condArgs []interface{}
+ var condSQL string
+ cond := session.statement.cond.And(autoCond)
+
+ var doIncVer = (table != nil && table.Version != "" && session.statement.checkVersion)
+ var verValue *reflect.Value
+ if doIncVer {
+ verValue, err = table.VersionColumn().ValueOf(bean)
+ if err != nil {
+ return 0, err
+ }
+
+ cond = cond.And(builder.Eq{session.engine.Quote(table.Version): verValue.Interface()})
+ colNames = append(colNames, session.engine.Quote(table.Version)+" = "+session.engine.Quote(table.Version)+" + 1")
+ }
+
+ condSQL, condArgs, err = builder.ToSQL(cond)
+ if err != nil {
+ return 0, err
+ }
+
+ if len(condSQL) > 0 {
+ condSQL = "WHERE " + condSQL
+ }
+
+ if st.OrderStr != "" {
+ condSQL = condSQL + fmt.Sprintf(" ORDER BY %v", st.OrderStr)
+ }
+
+ var tableName = session.statement.TableName()
+ // TODO: Oracle support needed
+ var top string
+ if st.LimitN > 0 {
+ if st.Engine.dialect.DBType() == core.MYSQL {
+ condSQL = condSQL + fmt.Sprintf(" LIMIT %d", st.LimitN)
+ } else if st.Engine.dialect.DBType() == core.SQLITE {
+ tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", st.LimitN)
+ cond = cond.And(builder.Expr(fmt.Sprintf("rowid IN (SELECT rowid FROM %v %v)",
+ session.engine.Quote(tableName), tempCondSQL), condArgs...))
+ condSQL, condArgs, err = builder.ToSQL(cond)
+ if err != nil {
+ return 0, err
+ }
+ if len(condSQL) > 0 {
+ condSQL = "WHERE " + condSQL
+ }
+ } else if st.Engine.dialect.DBType() == core.POSTGRES {
+ tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", st.LimitN)
+ cond = cond.And(builder.Expr(fmt.Sprintf("CTID IN (SELECT CTID FROM %v %v)",
+ session.engine.Quote(tableName), tempCondSQL), condArgs...))
+ condSQL, condArgs, err = builder.ToSQL(cond)
+ if err != nil {
+ return 0, err
+ }
+
+ if len(condSQL) > 0 {
+ condSQL = "WHERE " + condSQL
+ }
+ } else if st.Engine.dialect.DBType() == core.MSSQL {
+ if st.OrderStr != "" && st.Engine.dialect.DBType() == core.MSSQL &&
+ table != nil && len(table.PrimaryKeys) == 1 {
+ cond = builder.Expr(fmt.Sprintf("%s IN (SELECT TOP (%d) %s FROM %v%v)",
+ table.PrimaryKeys[0], st.LimitN, table.PrimaryKeys[0],
+ session.engine.Quote(tableName), condSQL), condArgs...)
+
+ condSQL, condArgs, err = builder.ToSQL(cond)
+ if err != nil {
+ return 0, err
+ }
+ if len(condSQL) > 0 {
+ condSQL = "WHERE " + condSQL
+ }
+ } else {
+ top = fmt.Sprintf("TOP (%d) ", st.LimitN)
+ }
+ }
+ }
+
+ if len(colNames) <= 0 {
+ return 0, errors.New("No content found to be updated")
+ }
+
+ sqlStr = fmt.Sprintf("UPDATE %v%v SET %v %v",
+ top,
+ session.engine.Quote(tableName),
+ strings.Join(colNames, ", "),
+ condSQL)
+
+ res, err := session.exec(sqlStr, append(args, condArgs...)...)
+ if err != nil {
+ return 0, err
+ } else if doIncVer {
+ if verValue != nil && verValue.IsValid() && verValue.CanSet() {
+ session.incrVersionFieldValue(verValue)
+ }
+ }
+
+ if cacher := session.engine.getCacher(tableName); cacher != nil && session.statement.UseCache {
+ // session.cacheUpdate(table, tableName, sqlStr, args...)
+ session.engine.logger.Debug("[cacheUpdate] clear table ", tableName)
+ cacher.ClearIds(tableName)
+ cacher.ClearBeans(tableName)
+ }
+
+ // handle after update processors
+ if session.isAutoCommit {
+ for _, closure := range session.afterClosures {
+ closure(bean)
+ }
+ if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok {
+ session.engine.logger.Debug("[event]", tableName, " has after update processor")
+ processor.AfterUpdate()
+ }
+ } else {
+ lenAfterClosures := len(session.afterClosures)
+ if lenAfterClosures > 0 {
+ if value, has := session.afterUpdateBeans[bean]; has && value != nil {
+ *value = append(*value, session.afterClosures...)
+ } else {
+ afterClosures := make([]func(interface{}), lenAfterClosures)
+ copy(afterClosures, session.afterClosures)
+ // FIXME: if bean is a map type, it will panic because map cannot be as map key
+ session.afterUpdateBeans[bean] = &afterClosures
+ }
+
+ } else {
+ if _, ok := interface{}(bean).(AfterUpdateProcessor); ok {
+ session.afterUpdateBeans[bean] = nil
+ }
+ }
+ }
+ cleanupProcessorsClosures(&session.afterClosures) // cleanup after used
+ // --
+
+ return res.RowsAffected()
+}
+
+func (session *Session) genUpdateColumns(bean interface{}) ([]string, []interface{}, error) {
+ table := session.statement.RefTable
+ colNames := make([]string, 0, len(table.ColumnsSeq()))
+ args := make([]interface{}, 0, len(table.ColumnsSeq()))
+
+ for _, col := range table.Columns() {
+ if !col.IsVersion && !col.IsCreated && !col.IsUpdated {
+ if session.statement.omitColumnMap.contain(col.Name) {
+ continue
+ }
+ }
+ if col.MapType == core.ONLYFROMDB {
+ continue
+ }
+
+ fieldValuePtr, err := col.ValueOf(bean)
+ if err != nil {
+ return nil, nil, err
+ }
+ fieldValue := *fieldValuePtr
+
+ if col.IsAutoIncrement {
+ switch fieldValue.Type().Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64:
+ if fieldValue.Int() == 0 {
+ continue
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64:
+ if fieldValue.Uint() == 0 {
+ continue
+ }
+ case reflect.String:
+ if len(fieldValue.String()) == 0 {
+ continue
+ }
+ case reflect.Ptr:
+ if fieldValue.Pointer() == 0 {
+ continue
+ }
+ }
+ }
+
+ if (col.IsDeleted && !session.statement.unscoped) || col.IsCreated {
+ continue
+ }
+
+ if len(session.statement.columnMap) > 0 {
+ if !session.statement.columnMap.contain(col.Name) {
+ continue
+ } else if _, ok := session.statement.incrColumns[col.Name]; ok {
+ continue
+ } else if _, ok := session.statement.decrColumns[col.Name]; ok {
+ continue
+ }
+ }
+
+ // !evalphobia! set fieldValue as nil when column is nullable and zero-value
+ if _, ok := getFlagForColumn(session.statement.nullableMap, col); ok {
+ if col.Nullable && isZero(fieldValue.Interface()) {
+ var nilValue *int
+ fieldValue = reflect.ValueOf(nilValue)
+ }
+ }
+
+ if col.IsUpdated && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ {
+ // if time is non-empty, then set to auto time
+ val, t := session.engine.nowTime(col)
+ args = append(args, val)
+
+ var colName = col.Name
+ session.afterClosures = append(session.afterClosures, func(bean interface{}) {
+ col := table.GetColumn(colName)
+ setColumnTime(bean, col, t)
+ })
+ } else if col.IsVersion && session.statement.checkVersion {
+ args = append(args, 1)
+ } else {
+ arg, err := session.value2Interface(col, fieldValue)
+ if err != nil {
+ return colNames, args, err
+ }
+ args = append(args, arg)
+ }
+
+ colNames = append(colNames, session.engine.Quote(col.Name)+" = ?")
+ }
+ return colNames, args, nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/statement.go b/vendor/github.com/go-xorm/xorm/statement.go
new file mode 100644
index 0000000..585378a
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/statement.go
@@ -0,0 +1,1262 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "xorm.io/builder"
+ "xorm.io/core"
+)
+
+// Statement save all the sql info for executing SQL
+type Statement struct {
+ RefTable *core.Table
+ Engine *Engine
+ Start int
+ LimitN int
+ idParam *core.PK
+ OrderStr string
+ JoinStr string
+ joinArgs []interface{}
+ GroupByStr string
+ HavingStr string
+ ColumnStr string
+ selectStr string
+ useAllCols bool
+ OmitStr string
+ AltTableName string
+ tableName string
+ RawSQL string
+ RawParams []interface{}
+ UseCascade bool
+ UseAutoJoin bool
+ StoreEngine string
+ Charset string
+ UseCache bool
+ UseAutoTime bool
+ noAutoCondition bool
+ IsDistinct bool
+ IsForUpdate bool
+ TableAlias string
+ allUseBool bool
+ checkVersion bool
+ unscoped bool
+ columnMap columnMap
+ omitColumnMap columnMap
+ mustColumnMap map[string]bool
+ nullableMap map[string]bool
+ incrColumns map[string]incrParam
+ decrColumns map[string]decrParam
+ exprColumns map[string]exprParam
+ cond builder.Cond
+ bufferSize int
+ context ContextCache
+ lastError error
+}
+
+// Init reset all the statement's fields
+func (statement *Statement) Init() {
+ statement.RefTable = nil
+ statement.Start = 0
+ statement.LimitN = 0
+ statement.OrderStr = ""
+ statement.UseCascade = true
+ statement.JoinStr = ""
+ statement.joinArgs = make([]interface{}, 0)
+ statement.GroupByStr = ""
+ statement.HavingStr = ""
+ statement.ColumnStr = ""
+ statement.OmitStr = ""
+ statement.columnMap = columnMap{}
+ statement.omitColumnMap = columnMap{}
+ statement.AltTableName = ""
+ statement.tableName = ""
+ statement.idParam = nil
+ statement.RawSQL = ""
+ statement.RawParams = make([]interface{}, 0)
+ statement.UseCache = true
+ statement.UseAutoTime = true
+ statement.noAutoCondition = false
+ statement.IsDistinct = false
+ statement.IsForUpdate = false
+ statement.TableAlias = ""
+ statement.selectStr = ""
+ statement.allUseBool = false
+ statement.useAllCols = false
+ statement.mustColumnMap = make(map[string]bool)
+ statement.nullableMap = make(map[string]bool)
+ statement.checkVersion = true
+ statement.unscoped = false
+ statement.incrColumns = make(map[string]incrParam)
+ statement.decrColumns = make(map[string]decrParam)
+ statement.exprColumns = make(map[string]exprParam)
+ statement.cond = builder.NewCond()
+ statement.bufferSize = 0
+ statement.context = nil
+ statement.lastError = nil
+}
+
+// NoAutoCondition if you do not want convert bean's field as query condition, then use this function
+func (statement *Statement) NoAutoCondition(no ...bool) *Statement {
+ statement.noAutoCondition = true
+ if len(no) > 0 {
+ statement.noAutoCondition = no[0]
+ }
+ return statement
+}
+
+// Alias set the table alias
+func (statement *Statement) Alias(alias string) *Statement {
+ statement.TableAlias = alias
+ return statement
+}
+
+// SQL adds raw sql statement
+func (statement *Statement) SQL(query interface{}, args ...interface{}) *Statement {
+ switch query.(type) {
+ case (*builder.Builder):
+ var err error
+ statement.RawSQL, statement.RawParams, err = query.(*builder.Builder).ToSQL()
+ if err != nil {
+ statement.lastError = err
+ }
+ case string:
+ statement.RawSQL = query.(string)
+ statement.RawParams = args
+ default:
+ statement.lastError = ErrUnSupportedSQLType
+ }
+
+ return statement
+}
+
+// Where add Where statement
+func (statement *Statement) Where(query interface{}, args ...interface{}) *Statement {
+ return statement.And(query, args...)
+}
+
+// And add Where & and statement
+func (statement *Statement) And(query interface{}, args ...interface{}) *Statement {
+ switch query.(type) {
+ case string:
+ cond := builder.Expr(query.(string), args...)
+ statement.cond = statement.cond.And(cond)
+ case map[string]interface{}:
+ cond := builder.Eq(query.(map[string]interface{}))
+ statement.cond = statement.cond.And(cond)
+ case builder.Cond:
+ cond := query.(builder.Cond)
+ statement.cond = statement.cond.And(cond)
+ for _, v := range args {
+ if vv, ok := v.(builder.Cond); ok {
+ statement.cond = statement.cond.And(vv)
+ }
+ }
+ default:
+ statement.lastError = ErrConditionType
+ }
+
+ return statement
+}
+
+// Or add Where & Or statement
+func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement {
+ switch query.(type) {
+ case string:
+ cond := builder.Expr(query.(string), args...)
+ statement.cond = statement.cond.Or(cond)
+ case map[string]interface{}:
+ cond := builder.Eq(query.(map[string]interface{}))
+ statement.cond = statement.cond.Or(cond)
+ case builder.Cond:
+ cond := query.(builder.Cond)
+ statement.cond = statement.cond.Or(cond)
+ for _, v := range args {
+ if vv, ok := v.(builder.Cond); ok {
+ statement.cond = statement.cond.Or(vv)
+ }
+ }
+ default:
+ // TODO: not support condition type
+ }
+ return statement
+}
+
+// In generate "Where column IN (?) " statement
+func (statement *Statement) In(column string, args ...interface{}) *Statement {
+ in := builder.In(statement.Engine.Quote(column), args...)
+ statement.cond = statement.cond.And(in)
+ return statement
+}
+
+// NotIn generate "Where column NOT IN (?) " statement
+func (statement *Statement) NotIn(column string, args ...interface{}) *Statement {
+ notIn := builder.NotIn(statement.Engine.Quote(column), args...)
+ statement.cond = statement.cond.And(notIn)
+ return statement
+}
+
+func (statement *Statement) setRefValue(v reflect.Value) error {
+ var err error
+ statement.RefTable, err = statement.Engine.autoMapType(reflect.Indirect(v))
+ if err != nil {
+ return err
+ }
+ statement.tableName = statement.Engine.TableName(v, true)
+ return nil
+}
+
+func (statement *Statement) setRefBean(bean interface{}) error {
+ var err error
+ statement.RefTable, err = statement.Engine.autoMapType(rValue(bean))
+ if err != nil {
+ return err
+ }
+ statement.tableName = statement.Engine.TableName(bean, true)
+ return nil
+}
+
+// Auto generating update columnes and values according a struct
+func (statement *Statement) buildUpdates(bean interface{},
+ includeVersion, includeUpdated, includeNil,
+ includeAutoIncr, update bool) ([]string, []interface{}) {
+ engine := statement.Engine
+ table := statement.RefTable
+ allUseBool := statement.allUseBool
+ useAllCols := statement.useAllCols
+ mustColumnMap := statement.mustColumnMap
+ nullableMap := statement.nullableMap
+ columnMap := statement.columnMap
+ omitColumnMap := statement.omitColumnMap
+ unscoped := statement.unscoped
+
+ var colNames = make([]string, 0)
+ var args = make([]interface{}, 0)
+ for _, col := range table.Columns() {
+ if !includeVersion && col.IsVersion {
+ continue
+ }
+ if col.IsCreated {
+ continue
+ }
+ if !includeUpdated && col.IsUpdated {
+ continue
+ }
+ if !includeAutoIncr && col.IsAutoIncrement {
+ continue
+ }
+ if col.IsDeleted && !unscoped {
+ continue
+ }
+ if omitColumnMap.contain(col.Name) {
+ continue
+ }
+ if len(columnMap) > 0 && !columnMap.contain(col.Name) {
+ continue
+ }
+
+ if col.MapType == core.ONLYFROMDB {
+ continue
+ }
+
+ fieldValuePtr, err := col.ValueOf(bean)
+ if err != nil {
+ engine.logger.Error(err)
+ continue
+ }
+
+ fieldValue := *fieldValuePtr
+ fieldType := reflect.TypeOf(fieldValue.Interface())
+ if fieldType == nil {
+ continue
+ }
+
+ requiredField := useAllCols
+ includeNil := useAllCols
+
+ if b, ok := getFlagForColumn(mustColumnMap, col); ok {
+ if b {
+ requiredField = true
+ } else {
+ continue
+ }
+ }
+
+ // !evalphobia! set fieldValue as nil when column is nullable and zero-value
+ if b, ok := getFlagForColumn(nullableMap, col); ok {
+ if b && col.Nullable && isZero(fieldValue.Interface()) {
+ var nilValue *int
+ fieldValue = reflect.ValueOf(nilValue)
+ fieldType = reflect.TypeOf(fieldValue.Interface())
+ includeNil = true
+ }
+ }
+
+ var val interface{}
+
+ if fieldValue.CanAddr() {
+ if structConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok {
+ data, err := structConvert.ToDB()
+ if err != nil {
+ engine.logger.Error(err)
+ } else {
+ val = data
+ }
+ goto APPEND
+ }
+ }
+
+ if structConvert, ok := fieldValue.Interface().(core.Conversion); ok {
+ data, err := structConvert.ToDB()
+ if err != nil {
+ engine.logger.Error(err)
+ } else {
+ val = data
+ }
+ goto APPEND
+ }
+
+ if fieldType.Kind() == reflect.Ptr {
+ if fieldValue.IsNil() {
+ if includeNil {
+ args = append(args, nil)
+ colNames = append(colNames, fmt.Sprintf("%v=?", engine.Quote(col.Name)))
+ }
+ continue
+ } else if !fieldValue.IsValid() {
+ continue
+ } else {
+ // dereference ptr type to instance type
+ fieldValue = fieldValue.Elem()
+ fieldType = reflect.TypeOf(fieldValue.Interface())
+ requiredField = true
+ }
+ }
+
+ switch fieldType.Kind() {
+ case reflect.Bool:
+ if allUseBool || requiredField {
+ val = fieldValue.Interface()
+ } else {
+ // if a bool in a struct, it will not be as a condition because it default is false,
+ // please use Where() instead
+ continue
+ }
+ case reflect.String:
+ if !requiredField && fieldValue.String() == "" {
+ continue
+ }
+ // for MyString, should convert to string or panic
+ if fieldType.String() != reflect.String.String() {
+ val = fieldValue.String()
+ } else {
+ val = fieldValue.Interface()
+ }
+ case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64:
+ if !requiredField && fieldValue.Int() == 0 {
+ continue
+ }
+ val = fieldValue.Interface()
+ case reflect.Float32, reflect.Float64:
+ if !requiredField && fieldValue.Float() == 0.0 {
+ continue
+ }
+ val = fieldValue.Interface()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64:
+ if !requiredField && fieldValue.Uint() == 0 {
+ continue
+ }
+ t := int64(fieldValue.Uint())
+ val = reflect.ValueOf(&t).Interface()
+ case reflect.Struct:
+ if fieldType.ConvertibleTo(core.TimeType) {
+ t := fieldValue.Convert(core.TimeType).Interface().(time.Time)
+ if !requiredField && (t.IsZero() || !fieldValue.IsValid()) {
+ continue
+ }
+ val = engine.formatColTime(col, t)
+ } else if nulType, ok := fieldValue.Interface().(driver.Valuer); ok {
+ val, _ = nulType.Value()
+ } else {
+ if !col.SQLType.IsJson() {
+ engine.autoMapType(fieldValue)
+ if table, ok := engine.Tables[fieldValue.Type()]; ok {
+ if len(table.PrimaryKeys) == 1 {
+ pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName)
+ // fix non-int pk issues
+ if pkField.IsValid() && (!requiredField && !isZero(pkField.Interface())) {
+ val = pkField.Interface()
+ } else {
+ continue
+ }
+ } else {
+ // TODO: how to handler?
+ panic("not supported")
+ }
+ } else {
+ val = fieldValue.Interface()
+ }
+ } else {
+ // Blank struct could not be as update data
+ if requiredField || !isStructZero(fieldValue) {
+ bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ panic(fmt.Sprintf("mashal %v failed", fieldValue.Interface()))
+ }
+ if col.SQLType.IsText() {
+ val = string(bytes)
+ } else if col.SQLType.IsBlob() {
+ val = bytes
+ }
+ } else {
+ continue
+ }
+ }
+ }
+ case reflect.Array, reflect.Slice, reflect.Map:
+ if !requiredField {
+ if fieldValue == reflect.Zero(fieldType) {
+ continue
+ }
+ if fieldType.Kind() == reflect.Array {
+ if isArrayValueZero(fieldValue) {
+ continue
+ }
+ } else if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 {
+ continue
+ }
+ }
+
+ if col.SQLType.IsText() {
+ bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ engine.logger.Error(err)
+ continue
+ }
+ val = string(bytes)
+ } else if col.SQLType.IsBlob() {
+ var bytes []byte
+ var err error
+ if fieldType.Kind() == reflect.Slice &&
+ fieldType.Elem().Kind() == reflect.Uint8 {
+ if fieldValue.Len() > 0 {
+ val = fieldValue.Bytes()
+ } else {
+ continue
+ }
+ } else if fieldType.Kind() == reflect.Array &&
+ fieldType.Elem().Kind() == reflect.Uint8 {
+ val = fieldValue.Slice(0, 0).Interface()
+ } else {
+ bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface())
+ if err != nil {
+ engine.logger.Error(err)
+ continue
+ }
+ val = bytes
+ }
+ } else {
+ continue
+ }
+ default:
+ val = fieldValue.Interface()
+ }
+
+ APPEND:
+ args = append(args, val)
+ if col.IsPrimaryKey && engine.dialect.DBType() == "ql" {
+ continue
+ }
+ colNames = append(colNames, fmt.Sprintf("%v = ?", engine.Quote(col.Name)))
+ }
+
+ return colNames, args
+}
+
+func (statement *Statement) needTableName() bool {
+ return len(statement.JoinStr) > 0
+}
+
+func (statement *Statement) colName(col *core.Column, tableName string) string {
+ if statement.needTableName() {
+ var nm = tableName
+ if len(statement.TableAlias) > 0 {
+ nm = statement.TableAlias
+ }
+ return statement.Engine.Quote(nm) + "." + statement.Engine.Quote(col.Name)
+ }
+ return statement.Engine.Quote(col.Name)
+}
+
+// TableName return current tableName
+func (statement *Statement) TableName() string {
+ if statement.AltTableName != "" {
+ return statement.AltTableName
+ }
+
+ return statement.tableName
+}
+
+// ID generate "where id = ? " statement or for composite key "where key1 = ? and key2 = ?"
+func (statement *Statement) ID(id interface{}) *Statement {
+ idValue := reflect.ValueOf(id)
+ idType := reflect.TypeOf(idValue.Interface())
+
+ switch idType {
+ case ptrPkType:
+ if pkPtr, ok := (id).(*core.PK); ok {
+ statement.idParam = pkPtr
+ return statement
+ }
+ case pkType:
+ if pk, ok := (id).(core.PK); ok {
+ statement.idParam = &pk
+ return statement
+ }
+ }
+
+ switch idType.Kind() {
+ case reflect.String:
+ statement.idParam = &core.PK{idValue.Convert(reflect.TypeOf("")).Interface()}
+ return statement
+ }
+
+ statement.idParam = &core.PK{id}
+ return statement
+}
+
+// Incr Generate "Update ... Set column = column + arg" statement
+func (statement *Statement) Incr(column string, arg ...interface{}) *Statement {
+ k := strings.ToLower(column)
+ if len(arg) > 0 {
+ statement.incrColumns[k] = incrParam{column, arg[0]}
+ } else {
+ statement.incrColumns[k] = incrParam{column, 1}
+ }
+ return statement
+}
+
+// Decr Generate "Update ... Set column = column - arg" statement
+func (statement *Statement) Decr(column string, arg ...interface{}) *Statement {
+ k := strings.ToLower(column)
+ if len(arg) > 0 {
+ statement.decrColumns[k] = decrParam{column, arg[0]}
+ } else {
+ statement.decrColumns[k] = decrParam{column, 1}
+ }
+ return statement
+}
+
+// SetExpr Generate "Update ... Set column = {expression}" statement
+func (statement *Statement) SetExpr(column string, expression string) *Statement {
+ k := strings.ToLower(column)
+ statement.exprColumns[k] = exprParam{column, expression}
+ return statement
+}
+
+// Generate "Update ... Set column = column + arg" statement
+func (statement *Statement) getInc() map[string]incrParam {
+ return statement.incrColumns
+}
+
+// Generate "Update ... Set column = column - arg" statement
+func (statement *Statement) getDec() map[string]decrParam {
+ return statement.decrColumns
+}
+
+// Generate "Update ... Set column = {expression}" statement
+func (statement *Statement) getExpr() map[string]exprParam {
+ return statement.exprColumns
+}
+
+func (statement *Statement) col2NewColsWithQuote(columns ...string) []string {
+ newColumns := make([]string, 0)
+ quotes := append(strings.Split(statement.Engine.Quote(""), ""), "`")
+ for _, col := range columns {
+ newColumns = append(newColumns, statement.Engine.Quote(eraseAny(col, quotes...)))
+ }
+ return newColumns
+}
+
+func (statement *Statement) colmap2NewColsWithQuote() []string {
+ newColumns := make([]string, len(statement.columnMap), len(statement.columnMap))
+ copy(newColumns, statement.columnMap)
+ for i := 0; i < len(statement.columnMap); i++ {
+ newColumns[i] = statement.Engine.Quote(newColumns[i])
+ }
+ return newColumns
+}
+
+// Distinct generates "DISTINCT col1, col2 " statement
+func (statement *Statement) Distinct(columns ...string) *Statement {
+ statement.IsDistinct = true
+ statement.Cols(columns...)
+ return statement
+}
+
+// ForUpdate generates "SELECT ... FOR UPDATE" statement
+func (statement *Statement) ForUpdate() *Statement {
+ statement.IsForUpdate = true
+ return statement
+}
+
+// Select replace select
+func (statement *Statement) Select(str string) *Statement {
+ statement.selectStr = str
+ return statement
+}
+
+// Cols generate "col1, col2" statement
+func (statement *Statement) Cols(columns ...string) *Statement {
+ cols := col2NewCols(columns...)
+ for _, nc := range cols {
+ statement.columnMap.add(nc)
+ }
+
+ newColumns := statement.colmap2NewColsWithQuote()
+
+ statement.ColumnStr = strings.Join(newColumns, ", ")
+ statement.ColumnStr = strings.Replace(statement.ColumnStr, statement.Engine.quote("*"), "*", -1)
+ return statement
+}
+
+// AllCols update use only: update all columns
+func (statement *Statement) AllCols() *Statement {
+ statement.useAllCols = true
+ return statement
+}
+
+// MustCols update use only: must update columns
+func (statement *Statement) MustCols(columns ...string) *Statement {
+ newColumns := col2NewCols(columns...)
+ for _, nc := range newColumns {
+ statement.mustColumnMap[strings.ToLower(nc)] = true
+ }
+ return statement
+}
+
+// UseBool indicates that use bool fields as update contents and query contiditions
+func (statement *Statement) UseBool(columns ...string) *Statement {
+ if len(columns) > 0 {
+ statement.MustCols(columns...)
+ } else {
+ statement.allUseBool = true
+ }
+ return statement
+}
+
+// Omit do not use the columns
+func (statement *Statement) Omit(columns ...string) {
+ newColumns := col2NewCols(columns...)
+ for _, nc := range newColumns {
+ statement.omitColumnMap = append(statement.omitColumnMap, nc)
+ }
+ statement.OmitStr = statement.Engine.Quote(strings.Join(newColumns, statement.Engine.Quote(", ")))
+}
+
+// Nullable Update use only: update columns to null when value is nullable and zero-value
+func (statement *Statement) Nullable(columns ...string) {
+ newColumns := col2NewCols(columns...)
+ for _, nc := range newColumns {
+ statement.nullableMap[strings.ToLower(nc)] = true
+ }
+}
+
+// Top generate LIMIT limit statement
+func (statement *Statement) Top(limit int) *Statement {
+ statement.Limit(limit)
+ return statement
+}
+
+// Limit generate LIMIT start, limit statement
+func (statement *Statement) Limit(limit int, start ...int) *Statement {
+ statement.LimitN = limit
+ if len(start) > 0 {
+ statement.Start = start[0]
+ }
+ return statement
+}
+
+// OrderBy generate "Order By order" statement
+func (statement *Statement) OrderBy(order string) *Statement {
+ if len(statement.OrderStr) > 0 {
+ statement.OrderStr += ", "
+ }
+ statement.OrderStr += order
+ return statement
+}
+
+// Desc generate `ORDER BY xx DESC`
+func (statement *Statement) Desc(colNames ...string) *Statement {
+ var buf builder.StringBuilder
+ if len(statement.OrderStr) > 0 {
+ fmt.Fprint(&buf, statement.OrderStr, ", ")
+ }
+ newColNames := statement.col2NewColsWithQuote(colNames...)
+ fmt.Fprintf(&buf, "%v DESC", strings.Join(newColNames, " DESC, "))
+ statement.OrderStr = buf.String()
+ return statement
+}
+
+// Asc provide asc order by query condition, the input parameters are columns.
+func (statement *Statement) Asc(colNames ...string) *Statement {
+ var buf builder.StringBuilder
+ if len(statement.OrderStr) > 0 {
+ fmt.Fprint(&buf, statement.OrderStr, ", ")
+ }
+ newColNames := statement.col2NewColsWithQuote(colNames...)
+ fmt.Fprintf(&buf, "%v ASC", strings.Join(newColNames, " ASC, "))
+ statement.OrderStr = buf.String()
+ return statement
+}
+
+// Table tempororily set table name, the parameter could be a string or a pointer of struct
+func (statement *Statement) Table(tableNameOrBean interface{}) *Statement {
+ v := rValue(tableNameOrBean)
+ t := v.Type()
+ if t.Kind() == reflect.Struct {
+ var err error
+ statement.RefTable, err = statement.Engine.autoMapType(v)
+ if err != nil {
+ statement.Engine.logger.Error(err)
+ return statement
+ }
+ }
+
+ statement.AltTableName = statement.Engine.TableName(tableNameOrBean, true)
+ return statement
+}
+
+// Join The joinOP should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN
+func (statement *Statement) Join(joinOP string, tablename interface{}, condition string, args ...interface{}) *Statement {
+ var buf builder.StringBuilder
+ if len(statement.JoinStr) > 0 {
+ fmt.Fprintf(&buf, "%v %v JOIN ", statement.JoinStr, joinOP)
+ } else {
+ fmt.Fprintf(&buf, "%v JOIN ", joinOP)
+ }
+
+ switch tp := tablename.(type) {
+ case builder.Builder:
+ subSQL, subQueryArgs, err := tp.ToSQL()
+ if err != nil {
+ statement.lastError = err
+ return statement
+ }
+ tbs := strings.Split(tp.TableName(), ".")
+ quotes := append(strings.Split(statement.Engine.Quote(""), ""), "`")
+
+ var aliasName = strings.Trim(tbs[len(tbs)-1], strings.Join(quotes, ""))
+ fmt.Fprintf(&buf, "(%s) %s ON %v", subSQL, aliasName, condition)
+ statement.joinArgs = append(statement.joinArgs, subQueryArgs...)
+ case *builder.Builder:
+ subSQL, subQueryArgs, err := tp.ToSQL()
+ if err != nil {
+ statement.lastError = err
+ return statement
+ }
+ tbs := strings.Split(tp.TableName(), ".")
+ quotes := append(strings.Split(statement.Engine.Quote(""), ""), "`")
+
+ var aliasName = strings.Trim(tbs[len(tbs)-1], strings.Join(quotes, ""))
+ fmt.Fprintf(&buf, "(%s) %s ON %v", subSQL, aliasName, condition)
+ statement.joinArgs = append(statement.joinArgs, subQueryArgs...)
+ default:
+ tbName := statement.Engine.TableName(tablename, true)
+ fmt.Fprintf(&buf, "%s ON %v", tbName, condition)
+ }
+
+ statement.JoinStr = buf.String()
+ statement.joinArgs = append(statement.joinArgs, args...)
+ return statement
+}
+
+// GroupBy generate "Group By keys" statement
+func (statement *Statement) GroupBy(keys string) *Statement {
+ statement.GroupByStr = keys
+ return statement
+}
+
+// Having generate "Having conditions" statement
+func (statement *Statement) Having(conditions string) *Statement {
+ statement.HavingStr = fmt.Sprintf("HAVING %v", conditions)
+ return statement
+}
+
+// Unscoped always disable struct tag "deleted"
+func (statement *Statement) Unscoped() *Statement {
+ statement.unscoped = true
+ return statement
+}
+
+func (statement *Statement) genColumnStr() string {
+ if statement.RefTable == nil {
+ return ""
+ }
+
+ var buf builder.StringBuilder
+ columns := statement.RefTable.Columns()
+
+ for _, col := range columns {
+ if statement.omitColumnMap.contain(col.Name) {
+ continue
+ }
+
+ if len(statement.columnMap) > 0 && !statement.columnMap.contain(col.Name) {
+ continue
+ }
+
+ if col.MapType == core.ONLYTODB {
+ continue
+ }
+
+ if buf.Len() != 0 {
+ buf.WriteString(", ")
+ }
+
+ if statement.JoinStr != "" {
+ if statement.TableAlias != "" {
+ buf.WriteString(statement.TableAlias)
+ } else {
+ buf.WriteString(statement.TableName())
+ }
+
+ buf.WriteString(".")
+ }
+
+ statement.Engine.QuoteTo(&buf, col.Name)
+ }
+
+ return buf.String()
+}
+
+func (statement *Statement) genCreateTableSQL() string {
+ return statement.Engine.dialect.CreateTableSql(statement.RefTable, statement.TableName(),
+ statement.StoreEngine, statement.Charset)
+}
+
+func (statement *Statement) genIndexSQL() []string {
+ var sqls []string
+ tbName := statement.TableName()
+ for _, index := range statement.RefTable.Indexes {
+ if index.Type == core.IndexType {
+ sql := statement.Engine.dialect.CreateIndexSql(tbName, index)
+ /*idxTBName := strings.Replace(tbName, ".", "_", -1)
+ idxTBName = strings.Replace(idxTBName, `"`, "", -1)
+ sql := fmt.Sprintf("CREATE INDEX %v ON %v (%v);", quote(indexName(idxTBName, idxName)),
+ quote(tbName), quote(strings.Join(index.Cols, quote(","))))*/
+ sqls = append(sqls, sql)
+ }
+ }
+ return sqls
+}
+
+func uniqueName(tableName, uqeName string) string {
+ return fmt.Sprintf("UQE_%v_%v", tableName, uqeName)
+}
+
+func (statement *Statement) genUniqueSQL() []string {
+ var sqls []string
+ tbName := statement.TableName()
+ for _, index := range statement.RefTable.Indexes {
+ if index.Type == core.UniqueType {
+ sql := statement.Engine.dialect.CreateIndexSql(tbName, index)
+ sqls = append(sqls, sql)
+ }
+ }
+ return sqls
+}
+
+func (statement *Statement) genDelIndexSQL() []string {
+ var sqls []string
+ tbName := statement.TableName()
+ idxPrefixName := strings.Replace(tbName, `"`, "", -1)
+ idxPrefixName = strings.Replace(idxPrefixName, `.`, "_", -1)
+ for idxName, index := range statement.RefTable.Indexes {
+ var rIdxName string
+ if index.Type == core.UniqueType {
+ rIdxName = uniqueName(idxPrefixName, idxName)
+ } else if index.Type == core.IndexType {
+ rIdxName = indexName(idxPrefixName, idxName)
+ }
+ sql := fmt.Sprintf("DROP INDEX %v", statement.Engine.Quote(statement.Engine.TableName(rIdxName, true)))
+ if statement.Engine.dialect.IndexOnTable() {
+ sql += fmt.Sprintf(" ON %v", statement.Engine.Quote(tbName))
+ }
+ sqls = append(sqls, sql)
+ }
+ return sqls
+}
+
+func (statement *Statement) genAddColumnStr(col *core.Column) (string, []interface{}) {
+ quote := statement.Engine.Quote
+ sql := fmt.Sprintf("ALTER TABLE %v ADD %v", quote(statement.TableName()),
+ col.String(statement.Engine.dialect))
+ if statement.Engine.dialect.DBType() == core.MYSQL && len(col.Comment) > 0 {
+ sql += " COMMENT '" + col.Comment + "'"
+ }
+ sql += ";"
+ return sql, []interface{}{}
+}
+
+func (statement *Statement) buildConds(table *core.Table, bean interface{}, includeVersion bool, includeUpdated bool, includeNil bool, includeAutoIncr bool, addedTableName bool) (builder.Cond, error) {
+ return statement.Engine.buildConds(table, bean, includeVersion, includeUpdated, includeNil, includeAutoIncr, statement.allUseBool, statement.useAllCols,
+ statement.unscoped, statement.mustColumnMap, statement.TableName(), statement.TableAlias, addedTableName)
+}
+
+func (statement *Statement) mergeConds(bean interface{}) error {
+ if !statement.noAutoCondition {
+ var addedTableName = (len(statement.JoinStr) > 0)
+ autoCond, err := statement.buildConds(statement.RefTable, bean, true, true, false, true, addedTableName)
+ if err != nil {
+ return err
+ }
+ statement.cond = statement.cond.And(autoCond)
+ }
+
+ if err := statement.processIDParam(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (statement *Statement) genConds(bean interface{}) (string, []interface{}, error) {
+ if err := statement.mergeConds(bean); err != nil {
+ return "", nil, err
+ }
+
+ return builder.ToSQL(statement.cond)
+}
+
+func (statement *Statement) genGetSQL(bean interface{}) (string, []interface{}, error) {
+ v := rValue(bean)
+ isStruct := v.Kind() == reflect.Struct
+ if isStruct {
+ statement.setRefBean(bean)
+ }
+
+ var columnStr = statement.ColumnStr
+ if len(statement.selectStr) > 0 {
+ columnStr = statement.selectStr
+ } else {
+ // TODO: always generate column names, not use * even if join
+ if len(statement.JoinStr) == 0 {
+ if len(columnStr) == 0 {
+ if len(statement.GroupByStr) > 0 {
+ columnStr = statement.Engine.quoteColumns(statement.GroupByStr)
+ } else {
+ columnStr = statement.genColumnStr()
+ }
+ }
+ } else {
+ if len(columnStr) == 0 {
+ if len(statement.GroupByStr) > 0 {
+ columnStr = statement.Engine.quoteColumns(statement.GroupByStr)
+ }
+ }
+ }
+ }
+
+ if len(columnStr) == 0 {
+ columnStr = "*"
+ }
+
+ if isStruct {
+ if err := statement.mergeConds(bean); err != nil {
+ return "", nil, err
+ }
+ } else {
+ if err := statement.processIDParam(); err != nil {
+ return "", nil, err
+ }
+ }
+ condSQL, condArgs, err := builder.ToSQL(statement.cond)
+ if err != nil {
+ return "", nil, err
+ }
+
+ sqlStr, err := statement.genSelectSQL(columnStr, condSQL, true, true)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return sqlStr, append(statement.joinArgs, condArgs...), nil
+}
+
+func (statement *Statement) genCountSQL(beans ...interface{}) (string, []interface{}, error) {
+ var condSQL string
+ var condArgs []interface{}
+ var err error
+ if len(beans) > 0 {
+ statement.setRefBean(beans[0])
+ condSQL, condArgs, err = statement.genConds(beans[0])
+ } else {
+ condSQL, condArgs, err = builder.ToSQL(statement.cond)
+ }
+ if err != nil {
+ return "", nil, err
+ }
+
+ var selectSQL = statement.selectStr
+ if len(selectSQL) <= 0 {
+ if statement.IsDistinct {
+ selectSQL = fmt.Sprintf("count(DISTINCT %s)", statement.ColumnStr)
+ } else {
+ selectSQL = "count(*)"
+ }
+ }
+ sqlStr, err := statement.genSelectSQL(selectSQL, condSQL, false, false)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return sqlStr, append(statement.joinArgs, condArgs...), nil
+}
+
+func (statement *Statement) genSumSQL(bean interface{}, columns ...string) (string, []interface{}, error) {
+ statement.setRefBean(bean)
+
+ var sumStrs = make([]string, 0, len(columns))
+ for _, colName := range columns {
+ if !strings.Contains(colName, " ") && !strings.Contains(colName, "(") {
+ colName = statement.Engine.Quote(colName)
+ }
+ sumStrs = append(sumStrs, fmt.Sprintf("COALESCE(sum(%s),0)", colName))
+ }
+ sumSelect := strings.Join(sumStrs, ", ")
+
+ condSQL, condArgs, err := statement.genConds(bean)
+ if err != nil {
+ return "", nil, err
+ }
+
+ sqlStr, err := statement.genSelectSQL(sumSelect, condSQL, true, true)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return sqlStr, append(statement.joinArgs, condArgs...), nil
+}
+
+func (statement *Statement) genSelectSQL(columnStr, condSQL string, needLimit, needOrderBy bool) (string, error) {
+ var (
+ distinct string
+ dialect = statement.Engine.Dialect()
+ quote = statement.Engine.Quote
+ fromStr = " FROM "
+ top, mssqlCondi, whereStr string
+ )
+ if statement.IsDistinct && !strings.HasPrefix(columnStr, "count") {
+ distinct = "DISTINCT "
+ }
+ if len(condSQL) > 0 {
+ whereStr = " WHERE " + condSQL
+ }
+
+ if dialect.DBType() == core.MSSQL && strings.Contains(statement.TableName(), "..") {
+ fromStr += statement.TableName()
+ } else {
+ fromStr += quote(statement.TableName())
+ }
+
+ if statement.TableAlias != "" {
+ if dialect.DBType() == core.ORACLE {
+ fromStr += " " + quote(statement.TableAlias)
+ } else {
+ fromStr += " AS " + quote(statement.TableAlias)
+ }
+ }
+ if statement.JoinStr != "" {
+ fromStr = fmt.Sprintf("%v %v", fromStr, statement.JoinStr)
+ }
+
+ if dialect.DBType() == core.MSSQL {
+ if statement.LimitN > 0 {
+ top = fmt.Sprintf("TOP %d ", statement.LimitN)
+ }
+ if statement.Start > 0 {
+ var column string
+ if len(statement.RefTable.PKColumns()) == 0 {
+ for _, index := range statement.RefTable.Indexes {
+ if len(index.Cols) == 1 {
+ column = index.Cols[0]
+ break
+ }
+ }
+ if len(column) == 0 {
+ column = statement.RefTable.ColumnsSeq()[0]
+ }
+ } else {
+ column = statement.RefTable.PKColumns()[0].Name
+ }
+ if statement.needTableName() {
+ if len(statement.TableAlias) > 0 {
+ column = statement.TableAlias + "." + column
+ } else {
+ column = statement.TableName() + "." + column
+ }
+ }
+
+ var orderStr string
+ if needOrderBy && len(statement.OrderStr) > 0 {
+ orderStr = " ORDER BY " + statement.OrderStr
+ }
+
+ var groupStr string
+ if len(statement.GroupByStr) > 0 {
+ groupStr = " GROUP BY " + statement.GroupByStr
+ }
+ mssqlCondi = fmt.Sprintf("(%s NOT IN (SELECT TOP %d %s%s%s%s%s))",
+ column, statement.Start, column, fromStr, whereStr, orderStr, groupStr)
+ }
+ }
+
+ var buf builder.StringBuilder
+ fmt.Fprintf(&buf, "SELECT %v%v%v%v%v", distinct, top, columnStr, fromStr, whereStr)
+ if len(mssqlCondi) > 0 {
+ if len(whereStr) > 0 {
+ fmt.Fprint(&buf, " AND ", mssqlCondi)
+ } else {
+ fmt.Fprint(&buf, " WHERE ", mssqlCondi)
+ }
+ }
+
+ if statement.GroupByStr != "" {
+ fmt.Fprint(&buf, " GROUP BY ", statement.GroupByStr)
+ }
+ if statement.HavingStr != "" {
+ fmt.Fprint(&buf, " ", statement.HavingStr)
+ }
+ if needOrderBy && statement.OrderStr != "" {
+ fmt.Fprint(&buf, " ORDER BY ", statement.OrderStr)
+ }
+ if needLimit {
+ if dialect.DBType() != core.MSSQL && dialect.DBType() != core.ORACLE {
+ if statement.Start > 0 {
+ fmt.Fprintf(&buf, " LIMIT %v OFFSET %v", statement.LimitN, statement.Start)
+ } else if statement.LimitN > 0 {
+ fmt.Fprint(&buf, " LIMIT ", statement.LimitN)
+ }
+ } else if dialect.DBType() == core.ORACLE {
+ if statement.Start != 0 || statement.LimitN != 0 {
+ oldString := buf.String()
+ buf.Reset()
+ rawColStr := columnStr
+ if rawColStr == "*" {
+ rawColStr = "at.*"
+ }
+ fmt.Fprintf(&buf, "SELECT %v FROM (SELECT %v,ROWNUM RN FROM (%v) at WHERE ROWNUM <= %d) aat WHERE RN > %d",
+ columnStr, rawColStr, oldString, statement.Start+statement.LimitN, statement.Start)
+ }
+ }
+ }
+ if statement.IsForUpdate {
+ return dialect.ForUpdateSql(buf.String()), nil
+ }
+
+ return buf.String(), nil
+}
+
+func (statement *Statement) processIDParam() error {
+ if statement.idParam == nil || statement.RefTable == nil {
+ return nil
+ }
+
+ if len(statement.RefTable.PrimaryKeys) != len(*statement.idParam) {
+ return fmt.Errorf("ID condition is error, expect %d primarykeys, there are %d",
+ len(statement.RefTable.PrimaryKeys),
+ len(*statement.idParam),
+ )
+ }
+
+ for i, col := range statement.RefTable.PKColumns() {
+ var colName = statement.colName(col, statement.TableName())
+ statement.cond = statement.cond.And(builder.Eq{colName: (*(statement.idParam))[i]})
+ }
+ return nil
+}
+
+func (statement *Statement) joinColumns(cols []*core.Column, includeTableName bool) string {
+ var colnames = make([]string, len(cols))
+ for i, col := range cols {
+ if includeTableName {
+ colnames[i] = statement.Engine.Quote(statement.TableName()) +
+ "." + statement.Engine.Quote(col.Name)
+ } else {
+ colnames[i] = statement.Engine.Quote(col.Name)
+ }
+ }
+ return strings.Join(colnames, ", ")
+}
+
+func (statement *Statement) convertIDSQL(sqlStr string) string {
+ if statement.RefTable != nil {
+ cols := statement.RefTable.PKColumns()
+ if len(cols) == 0 {
+ return ""
+ }
+
+ colstrs := statement.joinColumns(cols, false)
+ sqls := splitNNoCase(sqlStr, " from ", 2)
+ if len(sqls) != 2 {
+ return ""
+ }
+
+ var top string
+ if statement.LimitN > 0 && statement.Engine.dialect.DBType() == core.MSSQL {
+ top = fmt.Sprintf("TOP %d ", statement.LimitN)
+ }
+
+ newsql := fmt.Sprintf("SELECT %s%s FROM %v", top, colstrs, sqls[1])
+ return newsql
+ }
+ return ""
+}
+
+func (statement *Statement) convertUpdateSQL(sqlStr string) (string, string) {
+ if statement.RefTable == nil || len(statement.RefTable.PrimaryKeys) != 1 {
+ return "", ""
+ }
+
+ colstrs := statement.joinColumns(statement.RefTable.PKColumns(), true)
+ sqls := splitNNoCase(sqlStr, "where", 2)
+ if len(sqls) != 2 {
+ if len(sqls) == 1 {
+ return sqls[0], fmt.Sprintf("SELECT %v FROM %v",
+ colstrs, statement.Engine.Quote(statement.TableName()))
+ }
+ return "", ""
+ }
+
+ var whereStr = sqls[1]
+
+ // TODO: for postgres only, if any other database?
+ var paraStr string
+ if statement.Engine.dialect.DBType() == core.POSTGRES {
+ paraStr = "$"
+ } else if statement.Engine.dialect.DBType() == core.MSSQL {
+ paraStr = ":"
+ }
+
+ if paraStr != "" {
+ if strings.Contains(sqls[1], paraStr) {
+ dollers := strings.Split(sqls[1], paraStr)
+ whereStr = dollers[0]
+ for i, c := range dollers[1:] {
+ ccs := strings.SplitN(c, " ", 2)
+ whereStr += fmt.Sprintf(paraStr+"%v %v", i+1, ccs[1])
+ }
+ }
+ }
+
+ return sqls[0], fmt.Sprintf("SELECT %v FROM %v WHERE %v",
+ colstrs, statement.Engine.Quote(statement.TableName()),
+ whereStr)
+}
diff --git a/vendor/github.com/go-xorm/xorm/syslogger.go b/vendor/github.com/go-xorm/xorm/syslogger.go
new file mode 100644
index 0000000..11ba01e
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/syslogger.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows,!nacl,!plan9
+
+package xorm
+
+import (
+ "fmt"
+ "log/syslog"
+
+ "xorm.io/core"
+)
+
+var _ core.ILogger = &SyslogLogger{}
+
+// SyslogLogger will be depricated
+type SyslogLogger struct {
+ w *syslog.Writer
+ showSQL bool
+}
+
+// NewSyslogLogger implements core.ILogger
+func NewSyslogLogger(w *syslog.Writer) *SyslogLogger {
+ return &SyslogLogger{w: w}
+}
+
+// Debug log content as Debug
+func (s *SyslogLogger) Debug(v ...interface{}) {
+ s.w.Debug(fmt.Sprint(v...))
+}
+
+// Debugf log content as Debug and format
+func (s *SyslogLogger) Debugf(format string, v ...interface{}) {
+ s.w.Debug(fmt.Sprintf(format, v...))
+}
+
+// Error log content as Error
+func (s *SyslogLogger) Error(v ...interface{}) {
+ s.w.Err(fmt.Sprint(v...))
+}
+
+// Errorf log content as Errorf and format
+func (s *SyslogLogger) Errorf(format string, v ...interface{}) {
+ s.w.Err(fmt.Sprintf(format, v...))
+}
+
+// Info log content as Info
+func (s *SyslogLogger) Info(v ...interface{}) {
+ s.w.Info(fmt.Sprint(v...))
+}
+
+// Infof log content as Infof and format
+func (s *SyslogLogger) Infof(format string, v ...interface{}) {
+ s.w.Info(fmt.Sprintf(format, v...))
+}
+
+// Warn log content as Warn
+func (s *SyslogLogger) Warn(v ...interface{}) {
+ s.w.Warning(fmt.Sprint(v...))
+}
+
+// Warnf log content as Warnf and format
+func (s *SyslogLogger) Warnf(format string, v ...interface{}) {
+ s.w.Warning(fmt.Sprintf(format, v...))
+}
+
+// Level shows log level
+func (s *SyslogLogger) Level() core.LogLevel {
+ return core.LOG_UNKNOWN
+}
+
+// SetLevel always return error, as current log/syslog package doesn't allow to set priority level after syslog.Writer created
+func (s *SyslogLogger) SetLevel(l core.LogLevel) {}
+
+// ShowSQL set if logging SQL
+func (s *SyslogLogger) ShowSQL(show ...bool) {
+ if len(show) == 0 {
+ s.showSQL = true
+ return
+ }
+ s.showSQL = show[0]
+}
+
+// IsShowSQL if logging SQL
+func (s *SyslogLogger) IsShowSQL() bool {
+ return s.showSQL
+}
diff --git a/vendor/github.com/go-xorm/xorm/tag.go b/vendor/github.com/go-xorm/xorm/tag.go
new file mode 100644
index 0000000..6feb581
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/tag.go
@@ -0,0 +1,310 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "xorm.io/core"
+)
+
+type tagContext struct {
+ tagName string
+ params []string
+ preTag, nextTag string
+ table *core.Table
+ col *core.Column
+ fieldValue reflect.Value
+ isIndex bool
+ isUnique bool
+ indexNames map[string]int
+ engine *Engine
+ hasCacheTag bool
+ hasNoCacheTag bool
+ ignoreNext bool
+}
+
+// tagHandler describes tag handler for XORM
+type tagHandler func(ctx *tagContext) error
+
+var (
+ // defaultTagHandlers enumerates all the default tag handler
+ defaultTagHandlers = map[string]tagHandler{
+ "<-": OnlyFromDBTagHandler,
+ "->": OnlyToDBTagHandler,
+ "PK": PKTagHandler,
+ "NULL": NULLTagHandler,
+ "NOT": IgnoreTagHandler,
+ "AUTOINCR": AutoIncrTagHandler,
+ "DEFAULT": DefaultTagHandler,
+ "CREATED": CreatedTagHandler,
+ "UPDATED": UpdatedTagHandler,
+ "DELETED": DeletedTagHandler,
+ "VERSION": VersionTagHandler,
+ "UTC": UTCTagHandler,
+ "LOCAL": LocalTagHandler,
+ "NOTNULL": NotNullTagHandler,
+ "INDEX": IndexTagHandler,
+ "UNIQUE": UniqueTagHandler,
+ "CACHE": CacheTagHandler,
+ "NOCACHE": NoCacheTagHandler,
+ "COMMENT": CommentTagHandler,
+ }
+)
+
+func init() {
+ for k := range core.SqlTypes {
+ defaultTagHandlers[k] = SQLTypeTagHandler
+ }
+}
+
+// IgnoreTagHandler describes ignored tag handler
+func IgnoreTagHandler(ctx *tagContext) error {
+ return nil
+}
+
+// OnlyFromDBTagHandler describes mapping direction tag handler
+func OnlyFromDBTagHandler(ctx *tagContext) error {
+ ctx.col.MapType = core.ONLYFROMDB
+ return nil
+}
+
+// OnlyToDBTagHandler describes mapping direction tag handler
+func OnlyToDBTagHandler(ctx *tagContext) error {
+ ctx.col.MapType = core.ONLYTODB
+ return nil
+}
+
+// PKTagHandler decribes primary key tag handler
+func PKTagHandler(ctx *tagContext) error {
+ ctx.col.IsPrimaryKey = true
+ ctx.col.Nullable = false
+ return nil
+}
+
+// NULLTagHandler describes null tag handler
+func NULLTagHandler(ctx *tagContext) error {
+ ctx.col.Nullable = (strings.ToUpper(ctx.preTag) != "NOT")
+ return nil
+}
+
+// NotNullTagHandler describes notnull tag handler
+func NotNullTagHandler(ctx *tagContext) error {
+ ctx.col.Nullable = false
+ return nil
+}
+
+// AutoIncrTagHandler describes autoincr tag handler
+func AutoIncrTagHandler(ctx *tagContext) error {
+ ctx.col.IsAutoIncrement = true
+ /*
+ if len(ctx.params) > 0 {
+ autoStartInt, err := strconv.Atoi(ctx.params[0])
+ if err != nil {
+ return err
+ }
+ ctx.col.AutoIncrStart = autoStartInt
+ } else {
+ ctx.col.AutoIncrStart = 1
+ }
+ */
+ return nil
+}
+
+// DefaultTagHandler describes default tag handler
+func DefaultTagHandler(ctx *tagContext) error {
+ if len(ctx.params) > 0 {
+ ctx.col.Default = ctx.params[0]
+ } else {
+ ctx.col.Default = ctx.nextTag
+ ctx.ignoreNext = true
+ }
+ return nil
+}
+
+// CreatedTagHandler describes created tag handler
+func CreatedTagHandler(ctx *tagContext) error {
+ ctx.col.IsCreated = true
+ return nil
+}
+
+// VersionTagHandler describes version tag handler
+func VersionTagHandler(ctx *tagContext) error {
+ ctx.col.IsVersion = true
+ ctx.col.Default = "1"
+ return nil
+}
+
+// UTCTagHandler describes utc tag handler
+func UTCTagHandler(ctx *tagContext) error {
+ ctx.col.TimeZone = time.UTC
+ return nil
+}
+
+// LocalTagHandler describes local tag handler
+func LocalTagHandler(ctx *tagContext) error {
+ if len(ctx.params) == 0 {
+ ctx.col.TimeZone = time.Local
+ } else {
+ var err error
+ ctx.col.TimeZone, err = time.LoadLocation(ctx.params[0])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UpdatedTagHandler describes updated tag handler
+func UpdatedTagHandler(ctx *tagContext) error {
+ ctx.col.IsUpdated = true
+ return nil
+}
+
+// DeletedTagHandler describes deleted tag handler
+func DeletedTagHandler(ctx *tagContext) error {
+ ctx.col.IsDeleted = true
+ return nil
+}
+
+// IndexTagHandler describes index tag handler
+func IndexTagHandler(ctx *tagContext) error {
+ if len(ctx.params) > 0 {
+ ctx.indexNames[ctx.params[0]] = core.IndexType
+ } else {
+ ctx.isIndex = true
+ }
+ return nil
+}
+
+// UniqueTagHandler describes unique tag handler
+func UniqueTagHandler(ctx *tagContext) error {
+ if len(ctx.params) > 0 {
+ ctx.indexNames[ctx.params[0]] = core.UniqueType
+ } else {
+ ctx.isUnique = true
+ }
+ return nil
+}
+
+// CommentTagHandler add comment to column
+func CommentTagHandler(ctx *tagContext) error {
+ if len(ctx.params) > 0 {
+ ctx.col.Comment = strings.Trim(ctx.params[0], "' ")
+ }
+ return nil
+}
+
+// SQLTypeTagHandler describes SQL Type tag handler
+func SQLTypeTagHandler(ctx *tagContext) error {
+ ctx.col.SQLType = core.SQLType{Name: ctx.tagName}
+ if len(ctx.params) > 0 {
+ if ctx.tagName == core.Enum {
+ ctx.col.EnumOptions = make(map[string]int)
+ for k, v := range ctx.params {
+ v = strings.TrimSpace(v)
+ v = strings.Trim(v, "'")
+ ctx.col.EnumOptions[v] = k
+ }
+ } else if ctx.tagName == core.Set {
+ ctx.col.SetOptions = make(map[string]int)
+ for k, v := range ctx.params {
+ v = strings.TrimSpace(v)
+ v = strings.Trim(v, "'")
+ ctx.col.SetOptions[v] = k
+ }
+ } else {
+ var err error
+ if len(ctx.params) == 2 {
+ ctx.col.Length, err = strconv.Atoi(ctx.params[0])
+ if err != nil {
+ return err
+ }
+ ctx.col.Length2, err = strconv.Atoi(ctx.params[1])
+ if err != nil {
+ return err
+ }
+ } else if len(ctx.params) == 1 {
+ ctx.col.Length, err = strconv.Atoi(ctx.params[0])
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// ExtendsTagHandler describes extends tag handler
+func ExtendsTagHandler(ctx *tagContext) error {
+ var fieldValue = ctx.fieldValue
+ var isPtr = false
+ switch fieldValue.Kind() {
+ case reflect.Ptr:
+ f := fieldValue.Type().Elem()
+ if f.Kind() == reflect.Struct {
+ fieldPtr := fieldValue
+ fieldValue = fieldValue.Elem()
+ if !fieldValue.IsValid() || fieldPtr.IsNil() {
+ fieldValue = reflect.New(f).Elem()
+ }
+ }
+ isPtr = true
+ fallthrough
+ case reflect.Struct:
+ parentTable, err := ctx.engine.mapType(fieldValue)
+ if err != nil {
+ return err
+ }
+ for _, col := range parentTable.Columns() {
+ col.FieldName = fmt.Sprintf("%v.%v", ctx.col.FieldName, col.FieldName)
+
+ var tagPrefix = ctx.col.FieldName
+ if len(ctx.params) > 0 {
+ col.Nullable = isPtr
+ tagPrefix = ctx.params[0]
+ if col.IsPrimaryKey {
+ col.Name = ctx.col.FieldName
+ col.IsPrimaryKey = false
+ } else {
+ col.Name = fmt.Sprintf("%v%v", tagPrefix, col.Name)
+ }
+ }
+
+ if col.Nullable {
+ col.IsAutoIncrement = false
+ col.IsPrimaryKey = false
+ }
+
+ ctx.table.AddColumn(col)
+ for indexName, indexType := range col.Indexes {
+ addIndex(indexName, ctx.table, col, indexType)
+ }
+ }
+ default:
+ //TODO: warning
+ }
+ return nil
+}
+
+// CacheTagHandler describes cache tag handler
+func CacheTagHandler(ctx *tagContext) error {
+ if !ctx.hasCacheTag {
+ ctx.hasCacheTag = true
+ }
+ return nil
+}
+
+// NoCacheTagHandler describes nocache tag handler
+func NoCacheTagHandler(ctx *tagContext) error {
+ if !ctx.hasNoCacheTag {
+ ctx.hasNoCacheTag = true
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/test_mssql.sh b/vendor/github.com/go-xorm/xorm/test_mssql.sh
new file mode 100644
index 0000000..7f060cf
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_mssql.sh
@@ -0,0 +1 @@
+go test -db=mssql -conn_str="server=localhost;user id=sa;password=yourStrong(!)Password;database=xorm_test"
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_mssql_cache.sh b/vendor/github.com/go-xorm/xorm/test_mssql_cache.sh
new file mode 100644
index 0000000..76efd6c
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_mssql_cache.sh
@@ -0,0 +1 @@
+go test -db=mssql -conn_str="server=192.168.1.58;user id=sa;password=123456;database=xorm_test" -cache=true
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_mymysql.sh b/vendor/github.com/go-xorm/xorm/test_mymysql.sh
new file mode 100644
index 0000000..f7780d1
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_mymysql.sh
@@ -0,0 +1 @@
+go test -db=mymysql -conn_str="xorm_test/root/"
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_mymysql_cache.sh b/vendor/github.com/go-xorm/xorm/test_mymysql_cache.sh
new file mode 100644
index 0000000..0100286
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_mymysql_cache.sh
@@ -0,0 +1 @@
+go test -db=mymysql -conn_str="xorm_test/root/" -cache=true
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_mysql.sh b/vendor/github.com/go-xorm/xorm/test_mysql.sh
new file mode 100644
index 0000000..650e4ee
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_mysql.sh
@@ -0,0 +1 @@
+go test -db=mysql -conn_str="root:@/xorm_test"
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_mysql_cache.sh b/vendor/github.com/go-xorm/xorm/test_mysql_cache.sh
new file mode 100644
index 0000000..c542e73
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_mysql_cache.sh
@@ -0,0 +1 @@
+go test -db=mysql -conn_str="root:@/xorm_test" -cache=true
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_postgres.sh b/vendor/github.com/go-xorm/xorm/test_postgres.sh
new file mode 100644
index 0000000..dc1152e
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_postgres.sh
@@ -0,0 +1 @@
+go test -db=postgres -conn_str="dbname=xorm_test sslmode=disable"
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_postgres_cache.sh b/vendor/github.com/go-xorm/xorm/test_postgres_cache.sh
new file mode 100644
index 0000000..462fc94
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_postgres_cache.sh
@@ -0,0 +1 @@
+go test -db=postgres -conn_str="dbname=xorm_test sslmode=disable" -cache=true
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_sqlite.sh b/vendor/github.com/go-xorm/xorm/test_sqlite.sh
new file mode 100644
index 0000000..6352b5c
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_sqlite.sh
@@ -0,0 +1 @@
+go test -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc"
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_sqlite_cache.sh b/vendor/github.com/go-xorm/xorm/test_sqlite_cache.sh
new file mode 100644
index 0000000..75a054c
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_sqlite_cache.sh
@@ -0,0 +1 @@
+go test -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" -cache=true
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/test_tidb.sh b/vendor/github.com/go-xorm/xorm/test_tidb.sh
new file mode 100644
index 0000000..03d2d6c
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/test_tidb.sh
@@ -0,0 +1 @@
+go test -db=mysql -conn_str="root:@tcp(localhost:4000)/xorm_test" -ignore_select_update=true
\ No newline at end of file
diff --git a/vendor/github.com/go-xorm/xorm/transaction.go b/vendor/github.com/go-xorm/xorm/transaction.go
new file mode 100644
index 0000000..4104103
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/transaction.go
@@ -0,0 +1,26 @@
+// Copyright 2018 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+// Transaction Execute sql wrapped in a transaction(abbr as tx), tx will automatic commit if no errors occurred
+func (engine *Engine) Transaction(f func(*Session) (interface{}, error)) (interface{}, error) {
+ session := engine.NewSession()
+ defer session.Close()
+
+ if err := session.Begin(); err != nil {
+ return nil, err
+ }
+
+ result, err := f(session)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := session.Commit(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/go-xorm/xorm/types.go b/vendor/github.com/go-xorm/xorm/types.go
new file mode 100644
index 0000000..c76a546
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/types.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "reflect"
+
+ "xorm.io/core"
+)
+
+var (
+ ptrPkType = reflect.TypeOf(&core.PK{})
+ pkType = reflect.TypeOf(core.PK{})
+)
diff --git a/vendor/github.com/go-xorm/xorm/xorm.go b/vendor/github.com/go-xorm/xorm/xorm.go
new file mode 100644
index 0000000..26d00d2
--- /dev/null
+++ b/vendor/github.com/go-xorm/xorm/xorm.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package xorm
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "reflect"
+ "runtime"
+ "sync"
+ "time"
+
+ "xorm.io/core"
+)
+
+const (
+ // Version show the xorm's version
+ Version string = "0.7.0.0504"
+)
+
+func regDrvsNDialects() bool {
+ providedDrvsNDialects := map[string]struct {
+ dbType core.DbType
+ getDriver func() core.Driver
+ getDialect func() core.Dialect
+ }{
+ "mssql": {"mssql", func() core.Driver { return &odbcDriver{} }, func() core.Dialect { return &mssql{} }},
+ "odbc": {"mssql", func() core.Driver { return &odbcDriver{} }, func() core.Dialect { return &mssql{} }}, // !nashtsai! TODO change this when supporting MS Access
+ "mysql": {"mysql", func() core.Driver { return &mysqlDriver{} }, func() core.Dialect { return &mysql{} }},
+ "mymysql": {"mysql", func() core.Driver { return &mymysqlDriver{} }, func() core.Dialect { return &mysql{} }},
+ "postgres": {"postgres", func() core.Driver { return &pqDriver{} }, func() core.Dialect { return &postgres{} }},
+ "pgx": {"postgres", func() core.Driver { return &pqDriverPgx{} }, func() core.Dialect { return &postgres{} }},
+ "sqlite3": {"sqlite3", func() core.Driver { return &sqlite3Driver{} }, func() core.Dialect { return &sqlite3{} }},
+ "oci8": {"oracle", func() core.Driver { return &oci8Driver{} }, func() core.Dialect { return &oracle{} }},
+ "goracle": {"oracle", func() core.Driver { return &goracleDriver{} }, func() core.Dialect { return &oracle{} }},
+ }
+
+ for driverName, v := range providedDrvsNDialects {
+ if driver := core.QueryDriver(driverName); driver == nil {
+ core.RegisterDriver(driverName, v.getDriver())
+ core.RegisterDialect(v.dbType, v.getDialect)
+ }
+ }
+ return true
+}
+
+func close(engine *Engine) {
+ engine.Close()
+}
+
+func init() {
+ regDrvsNDialects()
+}
+
+// NewEngine new a db manager according to the parameter. Currently support four
+// drivers
+func NewEngine(driverName string, dataSourceName string) (*Engine, error) {
+ driver := core.QueryDriver(driverName)
+ if driver == nil {
+ return nil, fmt.Errorf("Unsupported driver name: %v", driverName)
+ }
+
+ uri, err := driver.Parse(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+
+ dialect := core.QueryDialect(uri.DbType)
+ if dialect == nil {
+ return nil, fmt.Errorf("Unsupported dialect type: %v", uri.DbType)
+ }
+
+ db, err := core.Open(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+
+ err = dialect.Init(db, uri, driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+
+ engine := &Engine{
+ db: db,
+ dialect: dialect,
+ Tables: make(map[reflect.Type]*core.Table),
+ mutex: &sync.RWMutex{},
+ TagIdentifier: "xorm",
+ TZLocation: time.Local,
+ tagHandlers: defaultTagHandlers,
+ cachers: make(map[string]core.Cacher),
+ defaultContext: context.Background(),
+ }
+
+ if uri.DbType == core.SQLITE {
+ engine.DatabaseTZ = time.UTC
+ } else {
+ engine.DatabaseTZ = time.Local
+ }
+
+ logger := NewSimpleLogger(os.Stdout)
+ logger.SetLevel(core.LOG_INFO)
+ engine.SetLogger(logger)
+ engine.SetMapper(core.NewCacheMapper(new(core.SnakeMapper)))
+
+ runtime.SetFinalizer(engine, close)
+
+ return engine, nil
+}
+
+// NewEngineWithParams new a db manager with params. The params will be passed to dialect.
+func NewEngineWithParams(driverName string, dataSourceName string, params map[string]string) (*Engine, error) {
+ engine, err := NewEngine(driverName, dataSourceName)
+ engine.dialect.SetParams(params)
+ return engine, err
+}
+
+// Clone clone an engine
+func (engine *Engine) Clone() (*Engine, error) {
+ return NewEngine(engine.DriverName(), engine.DataSourceName())
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..ece692e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore
new file mode 100644
index 0000000..cd3fcd1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml
diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml
new file mode 100644
index 0000000..a49db51
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+sudo: false
+
+matrix:
+ include:
+ - go: 1.7.x
+ - go: 1.8.x
+ - go: 1.9.x
+ - go: 1.10.x
+ - go: 1.11.x
+ - go: tip
+ allow_failures:
+ - go: tip
+
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - go vet $(go list ./... | grep -v /vendor/)
+ - go test -v -race ./...
diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS
new file mode 100644
index 0000000..1931f40
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/AUTHORS
@@ -0,0 +1,9 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd
+Google LLC (https://opensource.google.com/)
+Joachim Bauch
+
diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE
new file mode 100644
index 0000000..9171c97
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
new file mode 100644
index 0000000..20e391f
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/README.md
@@ -0,0 +1,64 @@
+# Gorilla WebSocket
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket)
+[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
+
+### Documentation
+
+* [API Reference](http://godoc.org/github.com/gorilla/websocket)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+ go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
+### Gorilla WebSocket compared with other packages
+
+
+
+Notes:
+
+1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
+2. The application can get the type of a received data message by implementing
+ a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
+ function.
+3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
+ Read returns when the input buffer is full or a frame boundary is
+ encountered. Each call to Write sends a single frame message. The Gorilla
+ io.Reader and io.WriteCloser operate on a single WebSocket message.
+
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
new file mode 100644
index 0000000..2e32fd5
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+ d := Dialer{
+ ReadBufferSize: readBufSize,
+ WriteBufferSize: writeBufSize,
+ NetDial: func(net, addr string) (net.Conn, error) {
+ return netConn, nil
+ },
+ }
+ return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+type Dialer struct {
+ // NetDial specifies the dial function for creating TCP connections. If
+ // NetDial is nil, net.Dial is used.
+ NetDial func(network, addr string) (net.Conn, error)
+
+ // NetDialContext specifies the dial function for creating TCP connections. If
+ // NetDialContext is nil, net.DialContext is used.
+ NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*http.Request) (*url.URL, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with tls.Client.
+ // If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
+ // size is zero, then a useful default size is used. The I/O buffer sizes
+ // do not limit the size of the messages that can be sent or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the client's requested subprotocols.
+ Subprotocols []string
+
+ // EnableCompression specifies if the client should attempt to negotiate
+ // per message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+
+ // Jar specifies the cookie jar.
+ // If Jar is nil, cookies are not sent in requests and ignored
+ // in responses.
+ Jar http.CookieJar
+}
+
+// Dial creates a new client connection by calling DialContext with a background context.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ return d.DialContext(context.Background(), urlStr, requestHeader)
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+ hostPort = u.Host
+ hostNoPort = u.Host
+ if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+ hostNoPort = hostNoPort[:i]
+ } else {
+ switch u.Scheme {
+ case "wss":
+ hostPort += ":443"
+ case "https":
+ hostPort += ":443"
+ default:
+ hostPort += ":80"
+ }
+ }
+ return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default values.
+var DefaultDialer = &Dialer{
+ Proxy: http.ProxyFromEnvironment,
+ HandshakeTimeout: 45 * time.Second,
+}
+
+// nilDialer is dialer to use when receiver is nil.
+var nilDialer = *DefaultDialer
+
+// DialContext creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// The context will be used in the request and in the Dialer
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ if d == nil {
+ d = &nilDialer
+ }
+
+ challengeKey, err := generateChallengeKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if u.User != nil {
+ // User name and password are not allowed in websocket URIs.
+ return nil, nil, errMalformedURL
+ }
+
+ req := &http.Request{
+ Method: "GET",
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: u.Host,
+ }
+ req = req.WithContext(ctx)
+
+ // Set the cookies present in the cookie jar of the dialer
+ if d.Jar != nil {
+ for _, cookie := range d.Jar.Cookies(u) {
+ req.AddCookie(cookie)
+ }
+ }
+
+ // Set the request headers using the capitalization for names and values in
+ // RFC examples. Although the capitalization shouldn't matter, there are
+ // servers that depend on it. The Header.Set method is not used because the
+ // method canonicalizes the header names.
+ req.Header["Upgrade"] = []string{"websocket"}
+ req.Header["Connection"] = []string{"Upgrade"}
+ req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+ req.Header["Sec-WebSocket-Version"] = []string{"13"}
+ if len(d.Subprotocols) > 0 {
+ req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+ }
+ for k, vs := range requestHeader {
+ switch {
+ case k == "Host":
+ if len(vs) > 0 {
+ req.Host = vs[0]
+ }
+ case k == "Upgrade" ||
+ k == "Connection" ||
+ k == "Sec-Websocket-Key" ||
+ k == "Sec-Websocket-Version" ||
+ k == "Sec-Websocket-Extensions" ||
+ (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+ return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+ case k == "Sec-Websocket-Protocol":
+ req.Header["Sec-WebSocket-Protocol"] = vs
+ default:
+ req.Header[k] = vs
+ }
+ }
+
+ if d.EnableCompression {
+ req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
+ }
+
+ if d.HandshakeTimeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
+ defer cancel()
+ }
+
+ // Get network dial function.
+ var netDial func(network, add string) (net.Conn, error)
+
+ if d.NetDialContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialContext(ctx, network, addr)
+ }
+ } else if d.NetDial != nil {
+ netDial = d.NetDial
+ } else {
+ netDialer := &net.Dialer{}
+ netDial = func(network, addr string) (net.Conn, error) {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ }
+
+ // If needed, wrap the dial function to set the connection deadline.
+ if deadline, ok := ctx.Deadline(); ok {
+ forwardDial := netDial
+ netDial = func(network, addr string) (net.Conn, error) {
+ c, err := forwardDial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ err = c.SetDeadline(deadline)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+ }
+ }
+
+ // If needed, wrap the dial function to connect through a proxy.
+ if d.Proxy != nil {
+ proxyURL, err := d.Proxy(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if proxyURL != nil {
+ dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+ if err != nil {
+ return nil, nil, err
+ }
+ netDial = dialer.Dial
+ }
+ }
+
+ hostPort, hostNoPort := hostPortNoPort(u)
+ trace := httptrace.ContextClientTrace(ctx)
+ if trace != nil && trace.GetConn != nil {
+ trace.GetConn(hostPort)
+ }
+
+ netConn, err := netDial("tcp", hostPort)
+ if trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{
+ Conn: netConn,
+ })
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer func() {
+ if netConn != nil {
+ netConn.Close()
+ }
+ }()
+
+ if u.Scheme == "https" {
+ cfg := cloneTLSConfig(d.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = hostNoPort
+ }
+ tlsConn := tls.Client(netConn, cfg)
+ netConn = tlsConn
+
+ var err error
+ if trace != nil {
+ err = doHandshakeWithTrace(trace, tlsConn, cfg)
+ } else {
+ err = doHandshake(tlsConn, cfg)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
+
+ if err := req.Write(netConn); err != nil {
+ return nil, nil, err
+ }
+
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
+ trace.GotFirstResponseByte()
+ }
+ }
+
+ resp, err := http.ReadResponse(conn.br, req)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if d.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ d.Jar.SetCookies(u, rc)
+ }
+ }
+
+ if resp.StatusCode != 101 ||
+ !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
+ !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
+ resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+ // Before closing the network connection on return from this
+ // function, slurp up some of the response to aid application
+ // debugging.
+ buf := make([]byte, 1024)
+ n, _ := io.ReadFull(resp.Body, buf)
+ resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+ return nil, resp, ErrBadHandshake
+ }
+
+ for _, ext := range parseExtensions(resp.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ _, snct := ext["server_no_context_takeover"]
+ _, cnct := ext["client_no_context_takeover"]
+ if !snct || !cnct {
+ return nil, resp, errInvalidCompression
+ }
+ conn.newCompressionWriter = compressNoContextTakeover
+ conn.newDecompressionReader = decompressNoContextTakeover
+ break
+ }
+
+ resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+ netConn.SetDeadline(time.Time{})
+ netConn = nil // to avoid close in defer.
+ return conn, resp, nil
+}
+
+func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go
new file mode 100644
index 0000000..4f0d943
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client_clone.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go
new file mode 100644
index 0000000..babb007
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go
@@ -0,0 +1,38 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+import "crypto/tls"
+
+// cloneTLSConfig clones all public fields except the fields
+// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
+// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
+// config in active use.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
new file mode 100644
index 0000000..813ffb1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/compression.go
@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "compress/flate"
+ "errors"
+ "io"
+ "strings"
+ "sync"
+)
+
+const (
+ minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
+ maxCompressionLevel = flate.BestCompression
+ defaultCompressionLevel = 1
+)
+
+var (
+ flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+ flateReaderPool = sync.Pool{New: func() interface{} {
+ return flate.NewReader(nil)
+ }}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+ const tail =
+ // Add four bytes as specified in RFC
+ "\x00\x00\xff\xff" +
+ // Add final block to squelch unexpected EOF error from flate reader.
+ "\x01\x00\x00\xff\xff"
+
+ fr, _ := flateReaderPool.Get().(io.ReadCloser)
+ fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+ return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+ return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+ p := &flateWriterPools[level-minCompressionLevel]
+ tw := &truncWriter{w: w}
+ fw, _ := p.Get().(*flate.Writer)
+ if fw == nil {
+ fw, _ = flate.NewWriter(tw, level)
+ } else {
+ fw.Reset(tw)
+ }
+ return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+ w io.WriteCloser
+ n int
+ p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+ n := 0
+
+ // fill buffer first for simplicity.
+ if w.n < len(w.p) {
+ n = copy(w.p[w.n:], p)
+ p = p[n:]
+ w.n += n
+ if len(p) == 0 {
+ return n, nil
+ }
+ }
+
+ m := len(p)
+ if m > len(w.p) {
+ m = len(w.p)
+ }
+
+ if nn, err := w.w.Write(w.p[:m]); err != nil {
+ return n + nn, err
+ }
+
+ copy(w.p[:], w.p[m:])
+ copy(w.p[len(w.p)-m:], p[len(p)-m:])
+ nn, err := w.w.Write(p[:len(p)-m])
+ return n + nn, err
+}
+
+type flateWriteWrapper struct {
+ fw *flate.Writer
+ tw *truncWriter
+ p *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+ if w.fw == nil {
+ return 0, errWriteClosed
+ }
+ return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+ if w.fw == nil {
+ return errWriteClosed
+ }
+ err1 := w.fw.Flush()
+ w.p.Put(w.fw)
+ w.fw = nil
+ if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+ return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+ }
+ err2 := w.tw.w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+type flateReadWrapper struct {
+ fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+ if r.fr == nil {
+ return 0, io.ErrClosedPipe
+ }
+ n, err := r.fr.Read(p)
+ if err == io.EOF {
+ // Preemptively place the reader back in the pool. This helps with
+ // scenarios where the application does not call NextReader() soon after
+ // this final read.
+ r.Close()
+ }
+ return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+ if r.fr == nil {
+ return io.ErrClosedPipe
+ }
+ err := r.fr.Close()
+ flateReaderPool.Put(r.fr)
+ r.fr = nil
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
new file mode 100644
index 0000000..d2a21c1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -0,0 +1,1165 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // Frame header byte 0 bits from Section 5.2 of RFC 6455
+ finalBit = 1 << 7
+ rsv1Bit = 1 << 6
+ rsv2Bit = 1 << 5
+ rsv3Bit = 1 << 4
+
+ // Frame header byte 1 bits from Section 5.2 of RFC 6455
+ maskBit = 1 << 7
+
+ maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
+ maxControlFramePayloadSize = 125
+
+ writeWait = time.Second
+
+ defaultReadBufferSize = 4096
+ defaultWriteBufferSize = 4096
+
+ continuationFrame = 0
+ noFrame = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+ CloseNormalClosure = 1000
+ CloseGoingAway = 1001
+ CloseProtocolError = 1002
+ CloseUnsupportedData = 1003
+ CloseNoStatusReceived = 1005
+ CloseAbnormalClosure = 1006
+ CloseInvalidFramePayloadData = 1007
+ ClosePolicyViolation = 1008
+ CloseMessageTooBig = 1009
+ CloseMandatoryExtension = 1010
+ CloseInternalServerErr = 1011
+ CloseServiceRestart = 1012
+ CloseTryAgainLater = 1013
+ CloseTLSHandshake = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+ // TextMessage denotes a text data message. The text message payload is
+ // interpreted as UTF-8 encoded text data.
+ TextMessage = 1
+
+ // BinaryMessage denotes a binary data message.
+ BinaryMessage = 2
+
+ // CloseMessage denotes a close control message. The optional message
+ // payload contains a numeric code and text. Use the FormatCloseMessage
+ // function to format a close message payload.
+ CloseMessage = 8
+
+ // PingMessage denotes a ping control message. The optional message payload
+ // is UTF-8 encoded text.
+ PingMessage = 9
+
+ // PongMessage denotes a pong control message. The optional message payload
+ // is UTF-8 encoded text.
+ PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+ msg string
+ temporary bool
+ timeout bool
+}
+
+func (e *netError) Error() string { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool { return e.timeout }
+
+// CloseError represents a close message.
+type CloseError struct {
+ // Code is defined in RFC 6455, section 11.7.
+ Code int
+
+ // Text is the optional text payload.
+ Text string
+}
+
+func (e *CloseError) Error() string {
+ s := []byte("websocket: close ")
+ s = strconv.AppendInt(s, int64(e.Code), 10)
+ switch e.Code {
+ case CloseNormalClosure:
+ s = append(s, " (normal)"...)
+ case CloseGoingAway:
+ s = append(s, " (going away)"...)
+ case CloseProtocolError:
+ s = append(s, " (protocol error)"...)
+ case CloseUnsupportedData:
+ s = append(s, " (unsupported data)"...)
+ case CloseNoStatusReceived:
+ s = append(s, " (no status)"...)
+ case CloseAbnormalClosure:
+ s = append(s, " (abnormal closure)"...)
+ case CloseInvalidFramePayloadData:
+ s = append(s, " (invalid payload data)"...)
+ case ClosePolicyViolation:
+ s = append(s, " (policy violation)"...)
+ case CloseMessageTooBig:
+ s = append(s, " (message too big)"...)
+ case CloseMandatoryExtension:
+ s = append(s, " (mandatory extension missing)"...)
+ case CloseInternalServerErr:
+ s = append(s, " (internal server error)"...)
+ case CloseTLSHandshake:
+ s = append(s, " (TLS handshake error)"...)
+ }
+ if e.Text != "" {
+ s = append(s, ": "...)
+ s = append(s, e.Text...)
+ }
+ return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range codes {
+ if e.Code == code {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range expectedCodes {
+ if e.Code == code {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var (
+ errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+ errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+ errBadWriteOpCode = errors.New("websocket: bad write message type")
+ errWriteClosed = errors.New("websocket: write closed")
+ errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+ n := rand.Uint32()
+ return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+ if e, ok := err.(net.Error); ok && e.Temporary() {
+ err = &netError{msg: e.Error(), timeout: e.Timeout()}
+ }
+ return err
+}
+
+func isControl(frameType int) bool {
+ return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+ return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+ // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+ CloseNormalClosure: true,
+ CloseGoingAway: true,
+ CloseProtocolError: true,
+ CloseUnsupportedData: true,
+ CloseNoStatusReceived: false,
+ CloseAbnormalClosure: false,
+ CloseInvalidFramePayloadData: true,
+ ClosePolicyViolation: true,
+ CloseMessageTooBig: true,
+ CloseMandatoryExtension: true,
+ CloseInternalServerErr: true,
+ CloseServiceRestart: true,
+ CloseTryAgainLater: true,
+ CloseTLSHandshake: false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+ return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
+// interface. The type of the value stored in a pool is not specified.
+type BufferPool interface {
+ // Get gets a value from the pool or returns nil if the pool is empty.
+ Get() interface{}
+ // Put adds a value to the pool.
+ Put(interface{})
+}
+
+// writePoolData is the type added to the write buffer pool. This wrapper is
+// used to prevent applications from peeking at and depending on the values
+// added to the pool.
+type writePoolData struct{ buf []byte }
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+ conn net.Conn
+ isServer bool
+ subprotocol string
+
+ // Write fields
+ mu chan bool // used as mutex to protect write to conn
+ writeBuf []byte // frame is constructed in this buffer.
+ writePool BufferPool
+ writeBufSize int
+ writeDeadline time.Time
+ writer io.WriteCloser // the current writer returned to the application
+ isWriting bool // for best-effort concurrent write detection
+
+ writeErrMu sync.Mutex
+ writeErr error
+
+ enableWriteCompression bool
+ compressionLevel int
+ newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
+
+ // Read fields
+ reader io.ReadCloser // the current reader returned to the application
+ readErr error
+ br *bufio.Reader
+ readRemaining int64 // bytes remaining in current frame.
+ readFinal bool // true the current message has more frames.
+ readLength int64 // Message size.
+ readLimit int64 // Maximum message size.
+ readMaskPos int
+ readMaskKey [4]byte
+ handlePong func(string) error
+ handlePing func(string) error
+ handleClose func(int, string) error
+ readErrCount int
+ messageReader *messageReader // the current low-level reader
+
+ readDecompress bool // whether last read frame had RSV1 set
+ newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
+
+ if br == nil {
+ if readBufferSize == 0 {
+ readBufferSize = defaultReadBufferSize
+ } else if readBufferSize < maxControlFramePayloadSize {
+ // must be large enough for control frame
+ readBufferSize = maxControlFramePayloadSize
+ }
+ br = bufio.NewReaderSize(conn, readBufferSize)
+ }
+
+ if writeBufferSize <= 0 {
+ writeBufferSize = defaultWriteBufferSize
+ }
+ writeBufferSize += maxFrameHeaderSize
+
+ if writeBuf == nil && writeBufferPool == nil {
+ writeBuf = make([]byte, writeBufferSize)
+ }
+
+ mu := make(chan bool, 1)
+ mu <- true
+ c := &Conn{
+ isServer: isServer,
+ br: br,
+ conn: conn,
+ mu: mu,
+ readFinal: true,
+ writeBuf: writeBuf,
+ writePool: writeBufferPool,
+ writeBufSize: writeBufferSize,
+ enableWriteCompression: true,
+ compressionLevel: defaultCompressionLevel,
+ }
+ c.SetCloseHandler(nil)
+ c.SetPingHandler(nil)
+ c.SetPongHandler(nil)
+ return c
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting
+// for a close message.
+func (c *Conn) Close() error {
+ return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+ err = hideTempErr(err)
+ c.writeErrMu.Lock()
+ if c.writeErr == nil {
+ c.writeErr = err
+ }
+ c.writeErrMu.Unlock()
+ return err
+}
+
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ c.br.Discard(len(p))
+ return p, err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
+ <-c.mu
+ defer func() { c.mu <- true }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ if len(buf1) == 0 {
+ _, err = c.conn.Write(buf0)
+ } else {
+ err = c.writeBufs(buf0, buf1)
+ }
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if frameType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return nil
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+ if !isControl(messageType) {
+ return errBadWriteOpCode
+ }
+ if len(data) > maxControlFramePayloadSize {
+ return errInvalidControlFrame
+ }
+
+ b0 := byte(messageType) | finalBit
+ b1 := byte(len(data))
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+ buf = append(buf, b0, b1)
+
+ if c.isServer {
+ buf = append(buf, data...)
+ } else {
+ key := newMaskKey()
+ buf = append(buf, key[:]...)
+ buf = append(buf, data...)
+ maskBytes(key, 0, buf[6:])
+ }
+
+ d := time.Hour * 1000
+ if !deadline.IsZero() {
+ d = deadline.Sub(time.Now())
+ if d < 0 {
+ return errWriteTimeout
+ }
+ }
+
+ timer := time.NewTimer(d)
+ select {
+ case <-c.mu:
+ timer.Stop()
+ case <-timer.C:
+ return errWriteTimeout
+ }
+ defer func() { c.mu <- true }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ _, err = c.conn.Write(buf)
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if messageType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return err
+}
+
+func (c *Conn) prepWrite(messageType int) error {
+ // Close previous writer if not already closed by the application. It's
+ // probably better to return an error in this situation, but we cannot
+ // change this without breaking existing applications.
+ if c.writer != nil {
+ c.writer.Close()
+ c.writer = nil
+ }
+
+ if !isControl(messageType) && !isData(messageType) {
+ return errBadWriteOpCode
+ }
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ if c.writeBuf == nil {
+ wpd, ok := c.writePool.Get().(writePoolData)
+ if ok {
+ c.writeBuf = wpd.buf
+ } else {
+ c.writeBuf = make([]byte, c.writeBufSize)
+ }
+ }
+ return nil
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+//
+// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
+// PongMessage) are supported.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+ if err := c.prepWrite(messageType); err != nil {
+ return nil, err
+ }
+
+ mw := &messageWriter{
+ c: c,
+ frameType: messageType,
+ pos: maxFrameHeaderSize,
+ }
+ c.writer = mw
+ if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+ w := c.newCompressionWriter(c.writer, c.compressionLevel)
+ mw.compress = true
+ c.writer = w
+ }
+ return c.writer, nil
+}
+
+type messageWriter struct {
+ c *Conn
+ compress bool // whether next call to flushFrame should set RSV1
+ pos int // end of data in writeBuf.
+ frameType int // type of the current frame.
+ err error
+}
+
+func (w *messageWriter) fatal(err error) error {
+ if w.err != nil {
+ w.err = err
+ w.c.writer = nil
+ }
+ return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+ c := w.c
+ length := w.pos - maxFrameHeaderSize + len(extra)
+
+ // Check for invalid control frames.
+ if isControl(w.frameType) &&
+ (!final || length > maxControlFramePayloadSize) {
+ return w.fatal(errInvalidControlFrame)
+ }
+
+ b0 := byte(w.frameType)
+ if final {
+ b0 |= finalBit
+ }
+ if w.compress {
+ b0 |= rsv1Bit
+ }
+ w.compress = false
+
+ b1 := byte(0)
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ // Assume that the frame starts at beginning of c.writeBuf.
+ framePos := 0
+ if c.isServer {
+ // Adjust up if mask not included in the header.
+ framePos = 4
+ }
+
+ switch {
+ case length >= 65536:
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 127
+ binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+ case length > 125:
+ framePos += 6
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 126
+ binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+ default:
+ framePos += 8
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | byte(length)
+ }
+
+ if !c.isServer {
+ key := newMaskKey()
+ copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+ maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+ if len(extra) > 0 {
+ return c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))
+ }
+ }
+
+ // Write the buffers to the connection with best-effort detection of
+ // concurrent writes. See the concurrency section in the package
+ // documentation for more info.
+
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+
+ err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+
+ if err != nil {
+ return w.fatal(err)
+ }
+
+ if final {
+ c.writer = nil
+ if c.writePool != nil {
+ c.writePool.Put(writePoolData{buf: c.writeBuf})
+ c.writeBuf = nil
+ }
+ return nil
+ }
+
+ // Setup for next frame.
+ w.pos = maxFrameHeaderSize
+ w.frameType = continuationFrame
+ return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+ n := len(w.c.writeBuf) - w.pos
+ if n <= 0 {
+ if err := w.flushFrame(false, nil); err != nil {
+ return 0, err
+ }
+ n = len(w.c.writeBuf) - w.pos
+ }
+ if n > max {
+ n = max
+ }
+ return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+ // Don't buffer large messages.
+ err := w.flushFrame(false, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for {
+ if w.pos == len(w.c.writeBuf) {
+ err = w.flushFrame(false, nil)
+ if err != nil {
+ break
+ }
+ }
+ var n int
+ n, err = r.Read(w.c.writeBuf[w.pos:])
+ w.pos += n
+ nn += int64(n)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ }
+ return nn, err
+}
+
+func (w *messageWriter) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+ if err := w.flushFrame(true, nil); err != nil {
+ return err
+ }
+ w.err = errWriteClosed
+ return nil
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+ frameType, frameData, err := pm.frame(prepareKey{
+ isServer: c.isServer,
+ compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+ compressionLevel: c.compressionLevel,
+ })
+ if err != nil {
+ return err
+ }
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+ err = c.write(frameType, c.writeDeadline, frameData, nil)
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+ return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+ // Fast path with no allocations and single frame.
+
+ if err := c.prepWrite(messageType); err != nil {
+ return err
+ }
+ mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize}
+ n := copy(c.writeBuf[mw.pos:], data)
+ mw.pos += n
+ data = data[n:]
+ return mw.flushFrame(true, data)
+ }
+
+ w, err := c.NextWriter(messageType)
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(data); err != nil {
+ return err
+ }
+ return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline = t
+ return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+ // 1. Skip remainder of previous frame.
+
+ if c.readRemaining > 0 {
+ if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 2. Read and parse first two bytes of frame header.
+
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ final := p[0]&finalBit != 0
+ frameType := int(p[0] & 0xf)
+ mask := p[1]&maskBit != 0
+ c.readRemaining = int64(p[1] & 0x7f)
+
+ c.readDecompress = false
+ if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 {
+ c.readDecompress = true
+ p[0] &^= rsv1Bit
+ }
+
+ if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 {
+ return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16))
+ }
+
+ switch frameType {
+ case CloseMessage, PingMessage, PongMessage:
+ if c.readRemaining > maxControlFramePayloadSize {
+ return noFrame, c.handleProtocolError("control frame length > 125")
+ }
+ if !final {
+ return noFrame, c.handleProtocolError("control frame not final")
+ }
+ case TextMessage, BinaryMessage:
+ if !c.readFinal {
+ return noFrame, c.handleProtocolError("message start before final message frame")
+ }
+ c.readFinal = final
+ case continuationFrame:
+ if c.readFinal {
+ return noFrame, c.handleProtocolError("continuation after final message frame")
+ }
+ c.readFinal = final
+ default:
+ return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
+ }
+
+ // 3. Read and parse frame length.
+
+ switch c.readRemaining {
+ case 126:
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+ c.readRemaining = int64(binary.BigEndian.Uint16(p))
+ case 127:
+ p, err := c.read(8)
+ if err != nil {
+ return noFrame, err
+ }
+ c.readRemaining = int64(binary.BigEndian.Uint64(p))
+ }
+
+ // 4. Handle frame masking.
+
+ if mask != c.isServer {
+ return noFrame, c.handleProtocolError("incorrect mask flag")
+ }
+
+ if mask {
+ c.readMaskPos = 0
+ p, err := c.read(len(c.readMaskKey))
+ if err != nil {
+ return noFrame, err
+ }
+ copy(c.readMaskKey[:], p)
+ }
+
+ // 5. For text and binary messages, enforce read limit and return.
+
+ if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+ c.readLength += c.readRemaining
+ if c.readLimit > 0 && c.readLength > c.readLimit {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+ return noFrame, ErrReadLimit
+ }
+
+ return frameType, nil
+ }
+
+ // 6. Read control frame payload.
+
+ var payload []byte
+ if c.readRemaining > 0 {
+ payload, err = c.read(int(c.readRemaining))
+ c.readRemaining = 0
+ if err != nil {
+ return noFrame, err
+ }
+ if c.isServer {
+ maskBytes(c.readMaskKey, 0, payload)
+ }
+ }
+
+ // 7. Process control frame payload.
+
+ switch frameType {
+ case PongMessage:
+ if err := c.handlePong(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case PingMessage:
+ if err := c.handlePing(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case CloseMessage:
+ closeCode := CloseNoStatusReceived
+ closeText := ""
+ if len(payload) >= 2 {
+ closeCode = int(binary.BigEndian.Uint16(payload))
+ if !isValidReceivedCloseCode(closeCode) {
+ return noFrame, c.handleProtocolError("invalid close code")
+ }
+ closeText = string(payload[2:])
+ if !utf8.ValidString(closeText) {
+ return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+ }
+ }
+ if err := c.handleClose(closeCode, closeText); err != nil {
+ return noFrame, err
+ }
+ return noFrame, &CloseError{Code: closeCode, Text: closeText}
+ }
+
+ return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
+ return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+ // Close previous reader, only relevant for decompression.
+ if c.reader != nil {
+ c.reader.Close()
+ c.reader = nil
+ }
+
+ c.messageReader = nil
+ c.readLength = 0
+
+ for c.readErr == nil {
+ frameType, err := c.advanceFrame()
+ if err != nil {
+ c.readErr = hideTempErr(err)
+ break
+ }
+ if frameType == TextMessage || frameType == BinaryMessage {
+ c.messageReader = &messageReader{c}
+ c.reader = c.messageReader
+ if c.readDecompress {
+ c.reader = c.newDecompressionReader(c.reader)
+ }
+ return frameType, c.reader, nil
+ }
+ }
+
+ // Applications that do handle the error returned from this method spin in
+ // tight loop on connection failure. To help application developers detect
+ // this error, panic on repeated reads to the failed connection.
+ c.readErrCount++
+ if c.readErrCount >= 1000 {
+ panic("repeated read on failed websocket connection")
+ }
+
+ return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+ c := r.c
+ if c.messageReader != r {
+ return 0, io.EOF
+ }
+
+ for c.readErr == nil {
+
+ if c.readRemaining > 0 {
+ if int64(len(b)) > c.readRemaining {
+ b = b[:c.readRemaining]
+ }
+ n, err := c.br.Read(b)
+ c.readErr = hideTempErr(err)
+ if c.isServer {
+ c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+ }
+ c.readRemaining -= int64(n)
+ if c.readRemaining > 0 && c.readErr == io.EOF {
+ c.readErr = errUnexpectedEOF
+ }
+ return n, c.readErr
+ }
+
+ if c.readFinal {
+ c.messageReader = nil
+ return 0, io.EOF
+ }
+
+ frameType, err := c.advanceFrame()
+ switch {
+ case err != nil:
+ c.readErr = hideTempErr(err)
+ case frameType == TextMessage || frameType == BinaryMessage:
+ c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+ }
+ }
+
+ err := c.readErr
+ if err == io.EOF && c.messageReader == r {
+ err = errUnexpectedEOF
+ }
+ return 0, err
+}
+
+func (r *messageReader) Close() error {
+ return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+ var r io.Reader
+ messageType, r, err = c.NextReader()
+ if err != nil {
+ return messageType, nil, err
+ }
+ p, err = ioutil.ReadAll(r)
+ return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close message to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+ c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+ return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close
+// message back to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// close messages as described in the section on Control Messages above.
+//
+// The connection read methods return a CloseError when a close message is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close message back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+ if h == nil {
+ h = func(code int, text string) error {
+ message := FormatCloseMessage(code, "")
+ c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+ return nil
+ }
+ }
+ c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+ return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING message application data. The default
+// ping handler sends a pong to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// ping messages as described in the section on Control Messages above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(message string) error {
+ err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+ if err == ErrCloseSent {
+ return nil
+ } else if e, ok := err.(net.Error); ok && e.Temporary() {
+ return nil
+ }
+ return err
+ }
+ }
+ c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+ return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG message application data. The default
+// pong handler does nothing.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// pong messages as described in the section on Control Messages above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(string) error { return nil }
+ }
+ c.handlePong = h
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+func (c *Conn) UnderlyingConn() net.Conn {
+ return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+ c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+ if !isValidCompressionLevel(level) {
+ return errors.New("websocket: invalid compression level")
+ }
+ c.compressionLevel = level
+ return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+// An empty message is returned for code CloseNoStatusReceived.
+func FormatCloseMessage(closeCode int, text string) []byte {
+ if closeCode == CloseNoStatusReceived {
+ // Return empty message because it's illegal to send
+ // CloseNoStatusReceived. Return non-nil value in case application
+ // checks for nil.
+ return []byte{}
+ }
+ buf := make([]byte, 2+len(text))
+ binary.BigEndian.PutUint16(buf, uint16(closeCode))
+ copy(buf[2:], text)
+ return buf
+}
diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go
new file mode 100644
index 0000000..a509a21
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn_write.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "net"
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ b := net.Buffers(bufs)
+ _, err := b.WriteTo(c.conn)
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go
new file mode 100644
index 0000000..37edaff
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ for _, buf := range bufs {
+ if len(buf) > 0 {
+ if _, err := c.conn.Write(buf); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go
new file mode 100644
index 0000000..dcce1a6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/doc.go
@@ -0,0 +1,180 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application calls
+// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
+//
+// var upgrader = websocket.Upgrader{
+// ReadBufferSize: 1024,
+// WriteBufferSize: 1024,
+// }
+//
+// func handler(w http.ResponseWriter, r *http.Request) {
+// conn, err := upgrader.Upgrade(w, r, nil)
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// ... Use conn to send and receive messages.
+// }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+// for {
+// messageType, p, err := conn.ReadMessage()
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// if err := conn.WriteMessage(messageType, p); err != nil {
+// log.Println(err)
+// return
+// }
+// }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+// for {
+// messageType, r, err := conn.NextReader()
+// if err != nil {
+// return
+// }
+// w, err := conn.NextWriter(messageType)
+// if err != nil {
+// return err
+// }
+// if _, err := io.Copy(w, r); err != nil {
+// return err
+// }
+// if err := w.Close(); err != nil {
+// return err
+// }
+// }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by calling the handler function
+// set with the SetCloseHandler method and by returning a *CloseError from the
+// NextReader, ReadMessage or the message Read method. The default close
+// handler sends a close message to the peer.
+//
+// Connections handle received ping messages by calling the handler function
+// set with the SetPingHandler method. The default ping handler sends a pong
+// message to the peer.
+//
+// Connections handle received pong messages by calling the handler function
+// set with the SetPongHandler method. The default pong handler does nothing.
+// If an application sends ping messages, then the application should set a
+// pong handler to receive the corresponding pong.
+//
+// The control message handler functions are called from the NextReader,
+// ReadMessage and message reader Read methods. The default close and ping
+// handlers can block these methods for a short time when the handler writes to
+// the connection.
+//
+// The application must read the connection to process close, ping and pong
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+// func readLoop(c *websocket.Conn) {
+// for {
+// if _, _, err := c.NextReader(); err != nil {
+// c.Close()
+// break
+// }
+// }
+// }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and the Origin host is
+// not equal to the Host request header.
+//
+// The deprecated package-level Upgrade function does not perform origin
+// checking. The application is responsible for checking the Origin header
+// before calling the Upgrade function.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+// var upgrader = websocket.Upgrader{
+// EnableCompression: true,
+// }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+// conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket
diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go
new file mode 100644
index 0000000..dc2c1f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/json.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// Deprecated: Use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+ return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+ w, err := c.NextWriter(TextMessage)
+ if err != nil {
+ return err
+ }
+ err1 := json.NewEncoder(w).Encode(v)
+ err2 := w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// Deprecated: Use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+ return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+ _, r, err := c.NextReader()
+ if err != nil {
+ return err
+ }
+ err = json.NewDecoder(r).Decode(v)
+ if err == io.EOF {
+ // One value is expected in the message.
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
new file mode 100644
index 0000000..577fce9
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ // Mask one byte at a time for small buffers.
+ if len(b) < 2*wordSize {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+ }
+
+ // Mask one byte at a time to word boundary.
+ if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+ n = wordSize - n
+ for i := range b[:n] {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ b = b[n:]
+ }
+
+ // Create aligned word size key.
+ var k [wordSize]byte
+ for i := range k {
+ k[i] = key[(pos+i)&3]
+ }
+ kw := *(*uintptr)(unsafe.Pointer(&k))
+
+ // Mask one word at a time.
+ n := (len(b) / wordSize) * wordSize
+ for i := 0; i < n; i += wordSize {
+ *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+ }
+
+ // Mask one byte at a time for remaining bytes.
+ b = b[n:]
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go
new file mode 100644
index 0000000..2aac060
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask_safe.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go
new file mode 100644
index 0000000..74ec565
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/prepared.go
@@ -0,0 +1,102 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "net"
+ "sync"
+ "time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+ messageType int
+ data []byte
+ mu sync.Mutex
+ frames map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+ isServer bool
+ compress bool
+ compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+ once sync.Once
+ data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+ pm := &PreparedMessage{
+ messageType: messageType,
+ frames: make(map[prepareKey]*preparedFrame),
+ data: data,
+ }
+
+ // Prepare a plain server frame.
+ _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+ if err != nil {
+ return nil, err
+ }
+
+ // To protect against caller modifying the data argument, remember the data
+ // copied to the plain server frame.
+ pm.data = frameData[len(frameData)-len(data):]
+ return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+ pm.mu.Lock()
+ frame, ok := pm.frames[key]
+ if !ok {
+ frame = &preparedFrame{}
+ pm.frames[key] = frame
+ }
+ pm.mu.Unlock()
+
+ var err error
+ frame.once.Do(func() {
+ // Prepare a frame using a 'fake' connection.
+ // TODO: Refactor code in conn.go to allow more direct construction of
+ // the frame.
+ mu := make(chan bool, 1)
+ mu <- true
+ var nc prepareConn
+ c := &Conn{
+ conn: &nc,
+ mu: mu,
+ isServer: key.isServer,
+ compressionLevel: key.compressionLevel,
+ enableWriteCompression: true,
+ writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+ }
+ if key.compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ }
+ err = c.WriteMessage(pm.messageType, pm.data)
+ frame.data = nc.buf.Bytes()
+ })
+ return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+ buf bytes.Buffer
+ net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go
new file mode 100644
index 0000000..bf2478e
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/proxy.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/base64"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type netDialerFunc func(network, addr string) (net.Conn, error)
+
+func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
+ return fn(network, addr)
+}
+
+func init() {
+ proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+ return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil
+ })
+}
+
+type httpProxyDialer struct {
+ proxyURL *url.URL
+ fowardDial func(network, addr string) (net.Conn, error)
+}
+
+func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
+ hostPort, _ := hostPortNoPort(hpd.proxyURL)
+ conn, err := hpd.fowardDial(network, hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connectHeader := make(http.Header)
+ if user := hpd.proxyURL.User; user != nil {
+ proxyUser := user.Username()
+ if proxyPassword, passwordSet := user.Password(); passwordSet {
+ credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+ connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+ }
+ }
+
+ connectReq := &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: connectHeader,
+ }
+
+ if err := connectReq.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Read response. It's OK to use and discard buffered reader here becaue
+ // the remote server does not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ conn.Close()
+ f := strings.SplitN(resp.Status, " ", 2)
+ return nil, errors.New(f[1])
+ }
+ return conn, nil
+}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
new file mode 100644
index 0000000..a761824
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -0,0 +1,363 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+ message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+type Upgrader struct {
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
+ // size is zero, then buffers allocated by the HTTP server are used. The
+ // I/O buffer sizes do not limit the size of the messages that can be sent
+ // or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the server's supported protocols in order of
+ // preference. If this field is not nil, then the Upgrade method negotiates a
+ // subprotocol by selecting the first match in this list with a protocol
+ // requested by the client. If there's no match, then no protocol is
+ // negotiated (the Sec-Websocket-Protocol header is not included in the
+ // handshake response).
+ Subprotocols []string
+
+ // Error specifies the function for generating HTTP error responses. If Error
+ // is nil, then http.Error is used to generate the HTTP response.
+ Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+ // CheckOrigin returns true if the request Origin header is acceptable. If
+ // CheckOrigin is nil, then a safe default is used: return false if the
+ // Origin request header is present and the origin host is not equal to
+ // request Host header.
+ //
+ // A CheckOrigin function should carefully validate the request origin to
+ // prevent cross-site request forgery.
+ CheckOrigin func(r *http.Request) bool
+
+ // EnableCompression specify if the server should attempt to negotiate per
+ // message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+ err := HandshakeError{reason}
+ if u.Error != nil {
+ u.Error(w, r, status, err)
+ } else {
+ w.Header().Set("Sec-Websocket-Version", "13")
+ http.Error(w, http.StatusText(status), status)
+ }
+ return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ return equalASCIIFold(u.Host, r.Host)
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+ if u.Subprotocols != nil {
+ clientProtocols := Subprotocols(r)
+ for _, serverProtocol := range u.Subprotocols {
+ for _, clientProtocol := range clientProtocols {
+ if clientProtocol == serverProtocol {
+ return clientProtocol
+ }
+ }
+ }
+ } else if responseHeader != nil {
+ return responseHeader.Get("Sec-Websocket-Protocol")
+ }
+ return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// application negotiated subprotocol (Sec-WebSocket-Protocol).
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+ const badHandshake = "websocket: the client is not using the websocket protocol: "
+
+ if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
+ }
+
+ if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
+ }
+
+ if r.Method != "GET" {
+ return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
+ }
+
+ if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+ }
+
+ if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
+ }
+
+ checkOrigin := u.CheckOrigin
+ if checkOrigin == nil {
+ checkOrigin = checkSameOrigin
+ }
+ if !checkOrigin(r) {
+ return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
+ }
+
+ challengeKey := r.Header.Get("Sec-Websocket-Key")
+ if challengeKey == "" {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank")
+ }
+
+ subprotocol := u.selectSubprotocol(r, responseHeader)
+
+ // Negotiate PMCE
+ var compress bool
+ if u.EnableCompression {
+ for _, ext := range parseExtensions(r.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ compress = true
+ break
+ }
+ }
+
+ h, ok := w.(http.Hijacker)
+ if !ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+ }
+ var brw *bufio.ReadWriter
+ netConn, brw, err := h.Hijack()
+ if err != nil {
+ return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+ }
+
+ if brw.Reader.Buffered() > 0 {
+ netConn.Close()
+ return nil, errors.New("websocket: client sent data before handshake is complete")
+ }
+
+ var br *bufio.Reader
+ if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
+ // Reuse hijacked buffered reader as connection reader.
+ br = brw.Reader
+ }
+
+ buf := bufioWriterBuffer(netConn, brw.Writer)
+
+ var writeBuf []byte
+ if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
+ // Reuse hijacked write buffer as connection buffer.
+ writeBuf = buf
+ }
+
+ c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
+ c.subprotocol = subprotocol
+
+ if compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ c.newDecompressionReader = decompressNoContextTakeover
+ }
+
+ // Use larger of hijacked buffer and connection write buffer for header.
+ p := buf
+ if len(c.writeBuf) > len(p) {
+ p = c.writeBuf
+ }
+ p = p[:0]
+
+ p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+ p = append(p, computeAcceptKey(challengeKey)...)
+ p = append(p, "\r\n"...)
+ if c.subprotocol != "" {
+ p = append(p, "Sec-WebSocket-Protocol: "...)
+ p = append(p, c.subprotocol...)
+ p = append(p, "\r\n"...)
+ }
+ if compress {
+ p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+ }
+ for k, vs := range responseHeader {
+ if k == "Sec-Websocket-Protocol" {
+ continue
+ }
+ for _, v := range vs {
+ p = append(p, k...)
+ p = append(p, ": "...)
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if b <= 31 {
+ // prevent response splitting.
+ b = ' '
+ }
+ p = append(p, b)
+ }
+ p = append(p, "\r\n"...)
+ }
+ }
+ p = append(p, "\r\n"...)
+
+ // Clear deadlines set by HTTP server.
+ netConn.SetDeadline(time.Time{})
+
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+ }
+ if _, err = netConn.Write(p); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Time{})
+ }
+
+ return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// Deprecated: Use websocket.Upgrader instead.
+//
+// Upgrade does not perform origin checking. The application is responsible for
+// checking the Origin header before calling Upgrade. An example implementation
+// of the same origin policy check is:
+//
+// if req.Header.Get("Origin") != "http://"+req.Host {
+// http.Error(w, "Origin not allowed", http.StatusForbidden)
+// return
+// }
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+ u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+ u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+ // don't return errors to maintain backwards compatibility
+ }
+ u.CheckOrigin = func(r *http.Request) bool {
+ // allow all connections by default
+ return true
+ }
+ return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+ h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+ if h == "" {
+ return nil
+ }
+ protocols := strings.Split(h, ",")
+ for i := range protocols {
+ protocols[i] = strings.TrimSpace(protocols[i])
+ }
+ return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+ return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+ tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}
+
+// bufioReaderSize size returns the size of a bufio.Reader.
+func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
+ // This code assumes that peek on a reset reader returns
+ // bufio.Reader.buf[:0].
+ // TODO: Use bufio.Reader.Size() after Go 1.10
+ br.Reset(originalReader)
+ if p, err := br.Peek(0); err == nil {
+ return cap(p)
+ }
+ return 0
+}
+
+// writeHook is an io.Writer that records the last slice passed to it vio
+// io.Writer.Write.
+type writeHook struct {
+ p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+ wh.p = p
+ return len(p), nil
+}
+
+// bufioWriterBuffer grabs the buffer from a bufio.Writer.
+func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
+ // This code assumes that bufio.Writer.buf[:1] is passed to the
+ // bufio.Writer's underlying writer.
+ var wh writeHook
+ bw.Reset(&wh)
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(originalWriter)
+
+ return wh.p[:cap(wh.p)]
+}
diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go
new file mode 100644
index 0000000..834f122
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/trace.go
@@ -0,0 +1,19 @@
+// +build go1.8
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+ if trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ err := doHandshake(tlsConn, cfg)
+ if trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
+ }
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go
new file mode 100644
index 0000000..77d05a0
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/trace_17.go
@@ -0,0 +1,12 @@
+// +build !go1.8
+
+package websocket
+
+import (
+ "crypto/tls"
+ "net/http/httptrace"
+)
+
+func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
+ return doHandshake(tlsConn, cfg)
+}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
new file mode 100644
index 0000000..354001e
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -0,0 +1,237 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "io"
+ "net/http"
+ "strings"
+ "unicode/utf8"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+ h := sha1.New()
+ h.Write([]byte(challengeKey))
+ h.Write(keyGUID)
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+ p := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, p); err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Octet types from RFC 2616.
+var octetTypes [256]byte
+
+const (
+ isTokenOctet = 1 << iota
+ isSpaceOctet
+)
+
+func init() {
+ // From RFC 2616
+ //
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t byte
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+ if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ t |= isSpaceOctet
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isTokenOctet
+ }
+ octetTypes[c] = t
+ }
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpaceOctet == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func nextToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isTokenOctet == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func nextTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return nextToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// equalASCIIFold returns true if s is equal to t with ASCII case folding.
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains a token equal to value with ASCII case folding.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+ for _, s := range header[name] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ if equalASCIIFold(t, value) {
+ return true
+ }
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return false
+}
+
+// parseExtensions parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+ // From RFC 6455:
+ //
+ // Sec-WebSocket-Extensions = extension-list
+ // extension-list = 1#extension
+ // extension = extension-token *( ";" extension-param )
+ // extension-token = registered-token
+ // registered-token = token
+ // extension-param = token [ "=" (token | quoted-string) ]
+ // ;When using the quoted-string syntax variant, the value
+ // ;after quoted-string unescaping MUST conform to the
+ // ;'token' ABNF.
+
+ var result []map[string]string
+headers:
+ for _, s := range header["Sec-Websocket-Extensions"] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ ext := map[string]string{"": t}
+ for {
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ";") {
+ break
+ }
+ var k string
+ k, s = nextToken(skipSpace(s[1:]))
+ if k == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ var v string
+ if strings.HasPrefix(s, "=") {
+ v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+ s = skipSpace(s)
+ }
+ if s != "" && s[0] != ',' && s[0] != ';' {
+ continue headers
+ }
+ ext[k] = v
+ }
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ result = append(result, ext)
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go
new file mode 100644
index 0000000..2e668f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+//
+
+package websocket
+
+import (
+ "errors"
+ "io"
+ "net"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type proxy_direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var proxy_Direct = proxy_direct{}
+
+func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type proxy_PerHost struct {
+ def, bypass proxy_Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
+ return &proxy_PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *proxy_PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *proxy_PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *proxy_PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *proxy_PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
+
+// A Dialer is a means to establish a connection.
+type proxy_Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type proxy_Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func proxy_FromEnvironment() proxy_Dialer {
+ allProxy := proxy_allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return proxy_Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return proxy_Direct
+ }
+ proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
+ if err != nil {
+ return proxy_Direct
+ }
+
+ noProxy := proxy_noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := proxy_NewPerHost(proxy, proxy_Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
+ if proxy_proxySchemes == nil {
+ proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
+ }
+ proxy_proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
+ var auth *proxy_Auth
+ if u.User != nil {
+ auth = new(proxy_Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return proxy_SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxy_proxySchemes != nil {
+ if f, ok := proxy_proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ proxy_allProxyEnv = &proxy_envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ proxy_noProxyEnv = &proxy_envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type proxy_envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *proxy_envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *proxy_envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928 and RFC 1929.
+func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
+ s := &proxy_socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type proxy_socks5 struct {
+ user, password string
+ network, addr string
+ forward proxy_Dialer
+}
+
+const proxy_socks5Version = 5
+
+const (
+ proxy_socks5AuthNone = 0
+ proxy_socks5AuthPassword = 2
+)
+
+const proxy_socks5Connect = 1
+
+const (
+ proxy_socks5IP4 = 1
+ proxy_socks5Domain = 3
+ proxy_socks5IP6 = 4
+)
+
+var proxy_socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the given network via the SOCKS5 proxy.
+func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.connect(conn, addr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *proxy_socks5) connect(conn net.Conn, target string) error {
+ host, portStr, err := net.SplitHostPort(target)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, proxy_socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ // See RFC 1929
+ if buf[1] == proxy_socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, proxy_socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, proxy_socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return errors.New("proxy: destination host name too long: " + host)
+ }
+ buf = append(buf, proxy_socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(proxy_socks5Errors) {
+ failure = proxy_socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case proxy_socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case proxy_socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case proxy_socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/.gitignore b/vendor/github.com/henrylee2cn/goutil/.gitignore
new file mode 100644
index 0000000..87b70ba
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/.gitignore
@@ -0,0 +1,39 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+*.o
+*.a
+_obj
+_test
+*.[568vq]
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+_testmain.go
+*.prof
+*.rar
+*.zip
+*.gz
+*.psd
+*.bmd
+*.cfg
+*.pptx
+*.log
+*.sublime-project
+*.sublime-workspace
+.DS_Store
diff --git a/vendor/github.com/henrylee2cn/goutil/README.md b/vendor/github.com/henrylee2cn/goutil/README.md
new file mode 100644
index 0000000..0d02b48
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/README.md
@@ -0,0 +1,27 @@
+# goutil [![report card](https://goreportcard.com/badge/github.com/henrylee2cn/goutil?style=flat-square)](http://goreportcard.com/report/henrylee2cn/goutil) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](http://godoc.org/github.com/henrylee2cn/goutil)
+
+Common and useful utils for the Go project development.
+
+## 1. Inclusion criteria
+
+- Only rely on the Go standard package
+- Functions or lightweight packages
+- Non-business related general tools
+
+## 2. Contents
+
+- [Tools](#) Some useful small functions.
+- [BitSet](bitset) A bit set
+- [Calendar](calendar) Chinese Lunar Calendar, Solar Calendar and cron time rules
+- [Cmder](cmder) Cmder exec cmd and catch the result
+- [CoarseTime](coarsetime) Current time truncated to the nearest 100ms
+- [Errors](errors) Improved errors package.
+- [Graceful](graceful) Shutdown or reboot current process gracefully
+- [HTTPBody](httpbody) HTTP body builder
+- [Password](password) Check password
+- [GoPool](pool) Goroutines' pool
+- [ResPool](pool) Resources' pool
+- [Workshop](pool) Non-blocking asynchronous multiplex resource pool
+- [Status](status) A handling status with code, msg, cause and stack
+- [Tpack](tpack) Go underlying type data
+- [Versioning](versioning) Version comparison tool that conforms to semantic version 2.0.0
diff --git a/vendor/github.com/henrylee2cn/goutil/currip.go b/vendor/github.com/henrylee2cn/goutil/currip.go
new file mode 100644
index 0000000..5bfdea8
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/currip.go
@@ -0,0 +1,75 @@
+package goutil
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+)
+
+// ExtranetIP get external IP addr.
+// NOTE: Query IP information from the service API: http://pv.sohu.com/cityjson?ie=utf-8
+func ExtranetIP() (ip string, err error) {
+ defer func() {
+ if p := recover(); p != nil {
+ err = fmt.Errorf("Get external IP error: %v", p)
+ } else if err != nil {
+ err = errors.New("Get external IP error: " + err.Error())
+ }
+ }()
+ resp, err := http.Get("http://pv.sohu.com/cityjson?ie=utf-8")
+ if err != nil {
+ return
+ }
+ b, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return
+ }
+ idx := bytes.Index(b, []byte(`"cip": "`))
+ b = b[idx+len(`"cip": "`):]
+ idx = bytes.Index(b, []byte(`"`))
+ b = b[:idx]
+ ip = string(b)
+ return
+}
+
+// IntranetIP get internal IP addr.
+func IntranetIP() (string, error) {
+ ifaces, err := net.Interfaces()
+ if err != nil {
+ return "", err
+ }
+ for _, iface := range ifaces {
+ if iface.Flags&net.FlagUp == 0 {
+ continue // interface down
+ }
+ if iface.Flags&net.FlagLoopback != 0 {
+ continue // loopback interface
+ }
+ addrs, err := iface.Addrs()
+ if err != nil {
+ return "", err
+ }
+ for _, addr := range addrs {
+ var ip net.IP
+ switch v := addr.(type) {
+ case *net.IPNet:
+ ip = v.IP
+ case *net.IPAddr:
+ ip = v.IP
+ }
+ if ip == nil || ip.IsLoopback() {
+ continue
+ }
+ ip = ip.To4()
+ if ip == nil {
+ continue // not an ipv4 address
+ }
+ return ip.String(), nil
+ }
+ }
+ return "", errors.New("Are you connected to the network?")
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/doc.go b/vendor/github.com/henrylee2cn/goutil/doc.go
new file mode 100644
index 0000000..db00e1f
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/doc.go
@@ -0,0 +1,7 @@
+// Common and useful utils for the Go project development.
+//
+// Inclusion criteria:
+// - Only rely on the Go standard package
+// - Functions or lightweight packages
+// - Non-business related general tools
+package goutil
diff --git a/vendor/github.com/henrylee2cn/goutil/encrypt.go b/vendor/github.com/henrylee2cn/goutil/encrypt.go
new file mode 100644
index 0000000..d0ce8cb
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/encrypt.go
@@ -0,0 +1,91 @@
+package goutil
+
+import (
+ "crypto/aes"
+ "crypto/md5"
+ "encoding/hex"
+ "errors"
+)
+
+// Md5 returns the MD5 checksum string of the data.
+func Md5(b []byte) string {
+ checksum := md5.Sum(b)
+ return hex.EncodeToString(checksum[:])
+}
+
+// AESEncrypt encrypts a piece of data.
+// The cipherkey argument should be the AES key,
+// either 16, 24, or 32 bytes to select
+// AES-128, AES-192, or AES-256.
+func AESEncrypt(cipherkey, src []byte) []byte {
+ block, err := aes.NewCipher(cipherkey)
+ if err != nil {
+ panic(err)
+ }
+ bs := block.BlockSize()
+ src = padData(src, bs)
+ r := make([]byte, len(src))
+ dst := r
+ for len(src) > 0 {
+ block.Encrypt(dst, src)
+ src = src[bs:]
+ dst = dst[bs:]
+ }
+ dst = make([]byte, hex.EncodedLen(len(r)))
+ hex.Encode(dst, r)
+ return dst
+}
+
+// AESDecrypt decrypts a piece of data.
+// The cipherkey argument should be the AES key,
+// either 16, 24, or 32 bytes to select
+// AES-128, AES-192, or AES-256.
+func AESDecrypt(cipherkey, ciphertext []byte) ([]byte, error) {
+ block, err := aes.NewCipher(cipherkey)
+ if err != nil {
+ return nil, err
+ }
+ src := make([]byte, hex.DecodedLen(len(ciphertext)))
+ _, err = hex.Decode(src, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+ bs := block.BlockSize()
+ r := make([]byte, len(src))
+ dst := r
+ for len(src) > 0 {
+ block.Decrypt(dst, src)
+ src = src[bs:]
+ dst = dst[bs:]
+ }
+ return removePad(r)
+}
+
+func padData(d []byte, bs int) []byte {
+ padedSize := ((len(d) / bs) + 1) * bs
+ pad := padedSize - len(d)
+ for i := len(d); i < padedSize; i++ {
+ d = append(d, byte(pad))
+ }
+ return d
+}
+
+func removePad(r []byte) ([]byte, error) {
+ l := len(r)
+ if l == 0 {
+ return []byte{}, errors.New("input []byte is empty")
+ }
+ last := int(r[l-1])
+ pad := r[l-last : l]
+ isPad := true
+ for _, v := range pad {
+ if int(v) != last {
+ isPad = false
+ break
+ }
+ }
+ if !isPad {
+ return r, errors.New("remove pad error")
+ }
+ return r[:l-last], nil
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/errors/errors.go b/vendor/github.com/henrylee2cn/goutil/errors/errors.go
new file mode 100644
index 0000000..fa0ef74
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/errors/errors.go
@@ -0,0 +1,96 @@
+// errors is improved errors package.
+package errors
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+
+ "github.com/henrylee2cn/goutil"
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+ return &myerror{text}
+}
+
+// myerror is a trivial implementation of error.
+type myerror struct {
+ s string
+}
+
+func (e *myerror) Error() string {
+ return e.s
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+func Errorf(format string, a ...interface{}) error {
+ return New(fmt.Sprintf(format, a...))
+}
+
+// Merge merges multiple errors.
+func Merge(errs ...error) error {
+ return Append(nil, errs...)
+}
+
+// Append appends multiple errors to the error.
+func Append(err error, errs ...error) error {
+ count := len(errs)
+ if count == 0 {
+ return err
+ }
+ var merged []error
+ if err != nil {
+ if e, ok := err.(*multiError); ok {
+ _count := len(e.errs)
+ merged = make([]error, _count, count+_count)
+ copy(merged, e.errs)
+ } else {
+ merged = make([]error, 1, count+1)
+ merged[0] = err
+ }
+ }
+ for _, err := range errs {
+ switch e := err.(type) {
+ case nil:
+ continue
+ case *multiError:
+ merged = append(merged, e.errs...)
+ default:
+ merged = append(merged, e)
+ }
+ }
+ if len(merged) == 0 {
+ return nil
+ }
+ return &multiError{
+ errs: merged,
+ }
+}
+
+// multiError merged multiple errors
+type multiError struct {
+ errs []error
+ text string
+}
+
+// mergePrefix the multiple errors prefix
+var mergePrefix = []byte("MultiError:\n")
+
+// Error implement error interface.
+func (m *multiError) Error() string {
+ if len(m.text) > 0 {
+ return m.text
+ }
+ var bText = make([]byte, len(mergePrefix), 56)
+ copy(bText, mergePrefix)
+ for i, err := range m.errs {
+ bText = append(bText, strconv.Itoa(i+1)...)
+ bText = append(bText, ". "...)
+ bText = append(bText, bytes.Trim(goutil.StringToBytes(err.Error()), "\n")...)
+ bText = append(bText, '\n')
+ }
+ m.text = goutil.BytesToString(bText)
+ return m.text
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/exported.go b/vendor/github.com/henrylee2cn/goutil/exported.go
new file mode 100644
index 0000000..c9ae69f
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/exported.go
@@ -0,0 +1,34 @@
+package goutil
+
+import (
+ "reflect"
+ "runtime"
+ "unicode"
+ "unicode/utf8"
+)
+
+// IsExportedOrBuiltinType is this type exported or a builtin?
+func IsExportedOrBuiltinType(t reflect.Type) bool {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ // PkgPath will be non-empty even for an exported type,
+ // so we need to check the type name as well.
+ return IsExportedName(t.Name()) || t.PkgPath() == ""
+}
+
+// IsExportedName is this an exported - upper case - name?
+func IsExportedName(name string) bool {
+ rune, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(rune)
+}
+
+// ObjectName gets the type name of the object
+func ObjectName(obj interface{}) string {
+ v := reflect.ValueOf(obj)
+ t := v.Type()
+ if t.Kind() == reflect.Func {
+ return runtime.FuncForPC(v.Pointer()).Name()
+ }
+ return t.String()
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/file.go b/vendor/github.com/henrylee2cn/goutil/file.go
new file mode 100644
index 0000000..1f604ca
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/file.go
@@ -0,0 +1,417 @@
+package goutil
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+// SelfPath gets compiled executable file absolute path.
+func SelfPath() string {
+ path, _ := filepath.Abs(os.Args[0])
+ return path
+}
+
+// SelfDir gets compiled executable file directory.
+func SelfDir() string {
+ return filepath.Dir(SelfPath())
+}
+
+// RelPath gets relative path.
+func RelPath(targpath string) string {
+ basepath, _ := filepath.Abs("./")
+ rel, _ := filepath.Rel(basepath, targpath)
+ return strings.Replace(rel, `\`, `/`, -1)
+}
+
+var curpath = SelfDir()
+
+// SelfChdir switch the working path to my own path.
+func SelfChdir() {
+ if err := os.Chdir(curpath); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// FileExists reports whether the named file or directory exists.
+func FileExists(name string) (existed bool) {
+ existed, _ = FileExist(name)
+ return
+}
+
+// FileExist reports whether the named file or directory exists.
+func FileExist(name string) (existed bool, isDir bool) {
+ info, err := os.Stat(name)
+ if err != nil {
+ return !os.IsNotExist(err), false
+ }
+ return true, info.IsDir()
+}
+
+// SearchFile Search a file in paths.
+// this is often used in search config file in /etc ~/
+func SearchFile(filename string, paths ...string) (fullpath string, err error) {
+ for _, path := range paths {
+ fullpath = filepath.Join(path, filename)
+ existed, _ := FileExist(fullpath)
+ if existed {
+ return
+ }
+ }
+ err = errors.New(fullpath + " not found in paths")
+ return
+}
+
+// GrepFile like command grep -E
+// for example: GrepFile(`^hello`, "hello.txt")
+// \n is striped while read
+func GrepFile(patten string, filename string) (lines []string, err error) {
+ re, err := regexp.Compile(patten)
+ if err != nil {
+ return
+ }
+
+ fd, err := os.Open(filename)
+ if err != nil {
+ return
+ }
+ lines = make([]string, 0)
+ reader := bufio.NewReader(fd)
+ prefix := ""
+ isLongLine := false
+ for {
+ byteLine, isPrefix, er := reader.ReadLine()
+ if er != nil && er != io.EOF {
+ return nil, er
+ }
+ if er == io.EOF {
+ break
+ }
+ line := string(byteLine)
+ if isPrefix {
+ prefix += line
+ continue
+ } else {
+ isLongLine = true
+ }
+
+ line = prefix + line
+ if isLongLine {
+ prefix = ""
+ }
+ if re.MatchString(line) {
+ lines = append(lines, line)
+ }
+ }
+ return lines, nil
+}
+
+// WalkDirs traverses the directory, return to the relative path.
+// You can specify the suffix.
+func WalkDirs(targpath string, suffixes ...string) (dirlist []string) {
+ if !filepath.IsAbs(targpath) {
+ targpath, _ = filepath.Abs(targpath)
+ }
+ err := filepath.Walk(targpath, func(retpath string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !f.IsDir() {
+ return nil
+ }
+ if len(suffixes) == 0 {
+ dirlist = append(dirlist, RelPath(retpath))
+ return nil
+ }
+ _retpath := RelPath(retpath)
+ for _, suffix := range suffixes {
+ if strings.HasSuffix(_retpath, suffix) {
+ dirlist = append(dirlist, _retpath)
+ }
+ }
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("utils.WalkRelDirs: %v\n", err)
+ return
+ }
+
+ return
+}
+
+// FilepathSplitExt splits the filename into a pair (root, ext) such that root + ext == filename,
+// and ext is empty or begins with a period and contains at most one period.
+// Leading periods on the basename are ignored; splitext('.cshrc') returns ('', '.cshrc').
+func FilepathSplitExt(filename string, slashInsensitive ...bool) (root, ext string) {
+ insensitive := false
+ if len(slashInsensitive) > 0 {
+ insensitive = slashInsensitive[0]
+ }
+ if insensitive {
+ filename = FilepathSlashInsensitive(filename)
+ }
+ for i := len(filename) - 1; i >= 0 && !os.IsPathSeparator(filename[i]); i-- {
+ if filename[i] == '.' {
+ return filename[:i], filename[i:]
+ }
+ }
+ return filename, ""
+}
+
+// FilepathStem returns the stem of filename.
+// Example:
+// FilepathStem("/root/dir/sub/file.ext") // output "file"
+// NOTE:
+// If slashInsensitive is empty, default is false.
+func FilepathStem(filename string, slashInsensitive ...bool) string {
+ insensitive := false
+ if len(slashInsensitive) > 0 {
+ insensitive = slashInsensitive[0]
+ }
+ if insensitive {
+ filename = FilepathSlashInsensitive(filename)
+ }
+ base := filepath.Base(filename)
+ for i := len(base) - 1; i >= 0; i-- {
+ if base[i] == '.' {
+ return base[:i]
+ }
+ }
+ return base
+}
+
+// FilepathSlashInsensitive ignore the difference between the slash and the backslash,
+// and convert to the same as the current system.
+func FilepathSlashInsensitive(path string) string {
+ if filepath.Separator == '/' {
+ return strings.Replace(path, "\\", "/", -1)
+ }
+ return strings.Replace(path, "/", "\\", -1)
+}
+
+// FilepathContains checks if the basepath path contains the subpaths.
+func FilepathContains(basepath string, subpaths []string) error {
+ basepath, err := filepath.Abs(basepath)
+ if err != nil {
+ return err
+ }
+ for _, p := range subpaths {
+ p, err = filepath.Abs(p)
+ if err != nil {
+ return err
+ }
+ rel, err := filepath.Rel(basepath, p)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, "..") {
+ return fmt.Errorf("%s is not include %s", basepath, p)
+ }
+ }
+ return nil
+}
+
+// FilepathAbsolute returns the absolute paths.
+func FilepathAbsolute(paths []string) ([]string, error) {
+ return StringsConvert(paths, func(p string) (string, error) {
+ return filepath.Abs(p)
+ })
+}
+
+// FilepathAbsoluteMap returns the absolute paths map.
+func FilepathAbsoluteMap(paths []string) (map[string]string, error) {
+ return StringsConvertMap(paths, func(p string) (string, error) {
+ return filepath.Abs(p)
+ })
+}
+
+// FilepathRelative returns the relative paths.
+func FilepathRelative(basepath string, targpaths []string) ([]string, error) {
+ basepath, err := filepath.Abs(basepath)
+ if err != nil {
+ return nil, err
+ }
+ return StringsConvert(targpaths, func(p string) (string, error) {
+ return filepathRelative(basepath, p)
+ })
+}
+
+// FilepathRelativeMap returns the relative paths map.
+func FilepathRelativeMap(basepath string, targpaths []string) (map[string]string, error) {
+ basepath, err := filepath.Abs(basepath)
+ if err != nil {
+ return nil, err
+ }
+ return StringsConvertMap(targpaths, func(p string) (string, error) {
+ return filepathRelative(basepath, p)
+ })
+}
+
+func filepathRelative(basepath, targpath string) (string, error) {
+ abs, err := filepath.Abs(targpath)
+ if err != nil {
+ return "", err
+ }
+ rel, err := filepath.Rel(basepath, abs)
+ if err != nil {
+ return "", err
+ }
+ if strings.HasPrefix(rel, "..") {
+ return "", fmt.Errorf("%s is not include %s", basepath, abs)
+ }
+ return rel, nil
+}
+
+// FilepathDistinct removes the same path and return in the original order.
+// If toAbs is true, return the result to absolute paths.
+func FilepathDistinct(paths []string, toAbs bool) ([]string, error) {
+ m := make(map[string]bool, len(paths))
+ ret := make([]string, 0, len(paths))
+ for _, p := range paths {
+ abs, err := filepath.Abs(p)
+ if err != nil {
+ return nil, err
+ }
+ if m[abs] {
+ continue
+ }
+ m[abs] = true
+ if toAbs {
+ ret = append(ret, abs)
+ } else {
+ ret = append(ret, p)
+ }
+ }
+ return ret, nil
+}
+
+// FilepathToSlash returns the result of replacing each separator character
+// in path with a slash ('/') character. Multiple separators are
+// replaced by multiple slashes.
+func FilepathToSlash(paths []string) []string {
+ ret, _ := StringsConvert(paths, func(p string) (string, error) {
+ return filepath.ToSlash(p), nil
+ })
+ return ret
+}
+
+// FilepathFromSlash returns the result of replacing each slash ('/') character
+// in path with a separator character. Multiple slashes are replaced
+// by multiple separators.
+func FilepathFromSlash(paths []string) []string {
+ ret, _ := StringsConvert(paths, func(p string) (string, error) {
+ return filepath.FromSlash(p), nil
+ })
+ return ret
+}
+
+// FilepathSame checks if the two paths are the same.
+func FilepathSame(path1, path2 string) (bool, error) {
+ if path1 == path2 {
+ return true, nil
+ }
+ p1, err := filepath.Abs(path1)
+ if err != nil {
+ return false, err
+ }
+ p2, err := filepath.Abs(path2)
+ if err != nil {
+ return false, err
+ }
+ return p1 == p2, nil
+}
+
+// MkdirAll creates a directory named path,
+// along with any necessary parents, and returns nil,
+// or else returns an error.
+// The permission bits perm (before umask) are used for all
+// directories that MkdirAll creates.
+// If path is already a directory, MkdirAll does nothing
+// and returns nil.
+// If perm is empty, default use 0755.
+func MkdirAll(path string, perm ...os.FileMode) error {
+ var fm os.FileMode = 0755
+ if len(perm) > 0 {
+ fm = perm[0]
+ }
+ return os.MkdirAll(path, fm)
+}
+
+// WriteFile writes file, and automatically creates the directory if necessary.
+// NOTE:
+// If perm is empty, automatically determine the file permissions based on extension.
+func WriteFile(filename string, data []byte, perm ...os.FileMode) error {
+ filename = filepath.FromSlash(filename)
+ err := MkdirAll(filepath.Dir(filename))
+ if err != nil {
+ return err
+ }
+ if len(perm) > 0 {
+ return ioutil.WriteFile(filename, data, perm[0])
+ }
+ var ext string
+ if idx := strings.LastIndex(filename, "."); idx != -1 {
+ ext = filename[idx:]
+ }
+ switch ext {
+ case ".sh", ".py", ".rb", ".bat", ".com", ".vbs", ".htm", ".run", ".App", ".exe", ".reg":
+ return ioutil.WriteFile(filename, data, 0755)
+ default:
+ return ioutil.WriteFile(filename, data, 0644)
+ }
+}
+
+// RewriteFile rewrites the file.
+func RewriteFile(filename string, fn func(content []byte) (newContent []byte, err error)) error {
+ f, err := os.OpenFile(filename, os.O_RDWR, 0777)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ cnt, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ newContent, err := fn(cnt)
+ if err != nil {
+ return err
+ }
+ f.Seek(0, 0)
+ f.Truncate(0)
+ _, err = f.Write(newContent)
+ return err
+}
+
+// RewriteToFile rewrites the file to newfilename.
+// If newfilename already exists and is not a directory, replaces it.
+func RewriteToFile(filename, newfilename string, fn func(content []byte) (newContent []byte, err error)) error {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err != nil {
+ return err
+ }
+ info, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ cnt, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ newContent, err := fn(cnt)
+ if err != nil {
+ return err
+ }
+ return WriteFile(newfilename, newContent, info.Mode())
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/gopath.go b/vendor/github.com/henrylee2cn/goutil/gopath.go
new file mode 100644
index 0000000..2da0cfd
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/gopath.go
@@ -0,0 +1,47 @@
+package goutil
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// GetFirstGopath gets the first $GOPATH value.
+func GetFirstGopath(allowAutomaticGuessing bool) (goPath string, err error) {
+ goPath = os.Getenv("GOPATH")
+ defer func() {
+ goPath = strings.Replace(goPath, "/", string(os.PathSeparator), -1)
+ }()
+ if len(goPath) == 0 {
+ if !allowAutomaticGuessing {
+ err = errors.New("not found GOPATH")
+ return
+ }
+ p, _ := os.Getwd()
+ p = strings.Replace(p, "\\", "/", -1) + "/"
+ i := strings.LastIndex(p, "/src/")
+ if i == -1 {
+ err = errors.New("not found GOPATH")
+ return
+ }
+ goPath = p[:i+1]
+ return
+ }
+ var sep string
+ if runtime.GOOS == "windows" {
+ sep = ";"
+ } else {
+ sep = ":"
+ }
+ if goPaths := strings.Split(goPath, sep); len(goPaths) > 1 {
+ goPath = goPaths[0]
+ }
+ goPath, _ = filepath.Abs(goPath)
+ goPath = strings.Replace(goPath, "\\", "/", -1)
+ if goPath[len(goPath)-1] != '/' {
+ goPath += "/"
+ }
+ return
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/gotest.go b/vendor/github.com/henrylee2cn/goutil/gotest.go
new file mode 100644
index 0000000..d5b68ee
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/gotest.go
@@ -0,0 +1,31 @@
+package goutil
+
+import (
+ "flag"
+ "os"
+ "strings"
+)
+
+// IsGoTest returns whether the current process is a test.
+func IsGoTest() bool {
+ return isGoTest
+}
+
+var isGoTest bool
+
+func init() {
+ isGoTest = checkGoTestEnv()
+}
+
+func checkGoTestEnv() bool {
+ maybe := flag.Lookup("test.v") != nil ||
+ flag.Lookup("test.run") != nil ||
+ flag.Lookup("test.bench") != nil
+ if !maybe {
+ return false
+ }
+ if len(os.Args) == 0 {
+ return false
+ }
+ return strings.HasSuffix(os.Args[0], ".test")
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/js_url.go b/vendor/github.com/henrylee2cn/goutil/js_url.go
new file mode 100644
index 0000000..ad7bbe7
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/js_url.go
@@ -0,0 +1,19 @@
+package goutil
+
+import (
+ "net/url"
+ "strings"
+)
+
+// JsQueryEscape escapes the string in javascript standard so it can be safely placed
+// inside a URL query.
+func JsQueryEscape(s string) string {
+ return strings.Replace(url.QueryEscape(s), "+", "%20", -1)
+}
+
+// JsQueryUnescape does the inverse transformation of JsQueryEscape, converting
+// %AB into the byte 0xAB and '+' into ' ' (space). It returns an error if
+// any % is not followed by two hexadecimal digits.
+func JsQueryUnescape(s string) (string, error) {
+ return url.QueryUnescape(strings.Replace(s, "%20", "+", -1))
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/map.go b/vendor/github.com/henrylee2cn/goutil/map.go
new file mode 100644
index 0000000..fc20f19
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/map.go
@@ -0,0 +1,647 @@
+package goutil
+
+import (
+ "math/rand"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unsafe"
+)
+
+// Map is a concurrent map with loads, stores, and deletes.
+// It is safe for multiple goroutines to call a Map's methods concurrently.
+type Map interface {
+ // Load returns the value stored in the map for a key, or nil if no
+ // value is present.
+ // The ok result indicates whether value was found in the map.
+ Load(key interface{}) (value interface{}, ok bool)
+ // Store sets the value for a key.
+ Store(key, value interface{})
+ // LoadOrStore returns the existing value for the key if present.
+ // Otherwise, it stores and returns the given value.
+ // The loaded result is true if the value was loaded, false if stored.
+ LoadOrStore(key, value interface{}) (actual interface{}, loaded bool)
+ // Range calls f sequentially for each key and value present in the map.
+ // If f returns false, range stops the iteration.
+ Range(f func(key, value interface{}) bool)
+ // Random returns a pair kv randomly.
+ // If exist=false, no kv data is exist.
+ Random() (key, value interface{}, exist bool)
+ // Delete deletes the value for a key.
+ Delete(key interface{})
+ // Clear clears all current data in the map.
+ Clear()
+ // Len returns the length of the map.
+ Len() int
+}
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// RwMap creates a new concurrent safe map with sync.RWMutex.
+// The normal Map is high-performance mapping under low concurrency conditions.
+func RwMap(capacity ...int) Map {
+ var cap int
+ if len(capacity) > 0 {
+ cap = capacity[0]
+ }
+ return &rwMap{
+ data: make(map[interface{}]interface{}, cap),
+ }
+}
+
+// rwMap concurrent secure data storage,
+// which is high-performance mapping under low concurrency conditions.
+type rwMap struct {
+ data map[interface{}]interface{}
+ rwmu sync.RWMutex
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *rwMap) Load(key interface{}) (value interface{}, ok bool) {
+ m.rwmu.RLock()
+ value, ok = m.data[key]
+ m.rwmu.RUnlock()
+ return value, ok
+}
+
+// Store sets the value for a key.
+func (m *rwMap) Store(key, value interface{}) {
+ m.rwmu.Lock()
+ m.data[key] = value
+ m.rwmu.Unlock()
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *rwMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
+ m.rwmu.Lock()
+ actual, loaded = m.data[key]
+ if !loaded {
+ m.data[key] = value
+ actual = value
+ }
+ m.rwmu.Unlock()
+ return actual, loaded
+}
+
+// Delete deletes the value for a key.
+func (m *rwMap) Delete(key interface{}) {
+ m.rwmu.Lock()
+ delete(m.data, key)
+ m.rwmu.Unlock()
+}
+
+// Range calls f sequentially for each key and value present in the map.
+// If f returns false, range stops the iteration.
+func (m *rwMap) Range(f func(key, value interface{}) bool) {
+ m.rwmu.RLock()
+ defer m.rwmu.RUnlock()
+ for k, v := range m.data {
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+// Clear clears all current data in the map.
+func (m *rwMap) Clear() {
+ m.rwmu.Lock()
+ for k := range m.data {
+ delete(m.data, k)
+ }
+ m.rwmu.Unlock()
+}
+
+// Random returns a pair kv randomly.
+// If exist=false, no kv data is exist.
+func (m *rwMap) Random() (key, value interface{}, exist bool) {
+ m.rwmu.RLock()
+ defer m.rwmu.RUnlock()
+ length := len(m.data)
+ if length == 0 {
+ return
+ }
+ i := rand.Intn(length)
+ for key, value = range m.data {
+ if i == 0 {
+ exist = true
+ return
+ }
+ i--
+ }
+ return
+}
+
+// Len returns the length of the map.
+// Note: the count is accurate.
+func (m *rwMap) Len() int {
+ m.rwmu.RLock()
+ defer m.rwmu.RUnlock()
+ return len(m.data)
+}
+
+// AtomicMap creates a concurrent map with amortized-constant-time loads, stores, and deletes.
+// It is safe for multiple goroutines to call a atomicMap's methods concurrently.
+// From go v1.9 sync.Map.
+func AtomicMap() Map {
+ return new(atomicMap)
+}
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// atomicMap is a concurrent map with amortized-constant-time loads, stores, and deletes.
+// It is safe for multiple goroutines to call a atomicMap's methods concurrently.
+//
+// It is optimized for use in concurrent loops with keys that are
+// stable over time, and either few steady-state stores, or stores
+// localized to one goroutine per key.
+//
+// For use cases that do not share these attributes, it will likely have
+// comparable or worse performance and worse type safety than an ordinary
+// map paired with a read-write mutex.
+//
+// The zero atomicMap is valid and empty.
+//
+// A atomicMap must not be copied after first use.
+type atomicMap struct {
+ mu sync.Mutex
+
+ // read contains the portion of the map's contents that are safe for
+ // concurrent access (with or without mu held).
+ //
+ // The read field itself is always safe to load, but must only be stored with
+ // mu held.
+ //
+ // Entries stored in read may be updated concurrently without mu, but updating
+ // a previously-expunged entry requires that the entry be copied to the dirty
+ // map and unexpunged with mu held.
+ read atomic.Value // readOnly
+
+ // dirty contains the portion of the map's contents that require mu to be
+ // held. To ensure that the dirty map can be promoted to the read map quickly,
+ // it also includes all of the non-expunged entries in the read map.
+ //
+ // Expunged entries are not stored in the dirty map. An expunged entry in the
+ // clean map must be unexpunged and added to the dirty map before a new value
+ // can be stored to it.
+ //
+ // If the dirty map is nil, the next write to the map will initialize it by
+ // making a shallow copy of the clean map, omitting stale entries.
+ dirty map[interface{}]*entry
+
+ // misses counts the number of loads since the read map was last updated that
+ // needed to lock mu to determine whether the key was present.
+ //
+ // Once enough misses have occurred to cover the cost of copying the dirty
+ // map, the dirty map will be promoted to the read map (in the unamended
+ // state) and the next store to the map will make a new dirty copy.
+ misses int
+
+ // @added by henrylee2cn 2017/11/17
+ length int32
+}
+
+// readOnly is an immutable struct stored atomically in the atomicMap.read field.
+type readOnly struct {
+ m map[interface{}]*entry
+ amended bool // true if the dirty map contains some key not in m.
+}
+
+// expunged is an arbitrary pointer that marks entries which have been deleted
+// from the dirty map.
+var expunged = unsafe.Pointer(new(interface{}))
+
+// An entry is a slot in the map corresponding to a particular key.
+type entry struct {
+ // p points to the interface{} value stored for the entry.
+ //
+ // If p == nil, the entry has been deleted and m.dirty == nil.
+ //
+ // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
+ // is missing from m.dirty.
+ //
+ // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
+ // != nil, in m.dirty[key].
+ //
+ // An entry can be deleted by atomic replacement with nil: when m.dirty is
+ // next created, it will atomically replace nil with expunged and leave
+ // m.dirty[key] unset.
+ //
+ // An entry's associated value can be updated by atomic replacement, provided
+ // p != expunged. If p == expunged, an entry's associated value can be updated
+ // only after first setting m.dirty[key] = e so that lookups using the dirty
+ // map find the entry.
+ p unsafe.Pointer // *interface{}
+}
+
+func newEntry(i interface{}) *entry {
+ return &entry{p: unsafe.Pointer(&i)}
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *atomicMap) Load(key interface{}) (value interface{}, ok bool) {
+ read, _ := m.read.Load().(readOnly)
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ // Avoid reporting a spurious miss if m.dirty got promoted while we were
+ // blocked on m.mu. (If further loads of the same key will not miss, it's
+ // not worth copying the dirty map for this key.)
+ read, _ = m.read.Load().(readOnly)
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if !ok {
+ return nil, false
+ }
+ return e.load()
+}
+
+func (e *entry) load() (value interface{}, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == expunged {
+ return nil, false
+ }
+ return *(*interface{})(p), true
+}
+
+// Store sets the value for a key.
+func (m *atomicMap) Store(key, value interface{}) {
+ read, _ := m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ switch e.tryStore(&value) {
+ case 1:
+ return
+ case 2:
+ // @added by henrylee2cn 2017/11/17
+ atomic.AddInt32(&m.length, 1)
+ return
+ }
+ }
+
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ switch e.tryStore(&value) {
+ case 1:
+ m.mu.Unlock()
+ return
+ case 2:
+ // @added by henrylee2cn 2017/11/17
+ atomic.AddInt32(&m.length, 1)
+ m.mu.Unlock()
+ return
+ case 0:
+ if e.unexpungeLocked() {
+ // The entry was previously expunged, which implies that there is a
+ // non-nil dirty map and this entry is not in it.
+ m.dirty[key] = e
+ // @added by henrylee2cn 2017/11/17
+ atomic.AddInt32(&m.length, 1)
+ }
+ e.storeLocked(&value)
+ }
+
+ } else if e, ok := m.dirty[key]; ok {
+ e.storeLocked(&value)
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(readOnly{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ atomic.AddInt32(&m.length, 1)
+ }
+ m.mu.Unlock()
+}
+
+// tryStore stores a value if the entry has not been expunged.
+//
+// If the entry is expunged, tryStore returns 0 and leaves the entry
+// unchanged.
+func (e *entry) tryStore(i *interface{}) int8 {
+ p := atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return 0
+ }
+ for {
+ if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
+ if p == nil {
+ return 2
+ }
+ return 1
+ }
+ p = atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return 0
+ }
+ }
+}
+
+// unexpungeLocked ensures that the entry is not marked as expunged.
+//
+// If the entry was previously expunged, it must be added to the dirty map
+// before m.mu is unlocked.
+func (e *entry) unexpungeLocked() (wasExpunged bool) {
+ return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
+}
+
+// storeLocked unconditionally stores a value to the entry.
+//
+// The entry must be known not to be expunged.
+func (e *entry) storeLocked(i *interface{}) {
+ atomic.StorePointer(&e.p, unsafe.Pointer(i))
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *atomicMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
+ // Avoid locking if it's a clean hit.
+ read, _ := m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ actual, loaded, ok := e.tryLoadOrStore(value)
+ if ok {
+ // @added by henrylee2cn 2017/11/17
+ if !loaded {
+ atomic.AddInt32(&m.length, 1)
+ }
+ return actual, loaded
+ }
+ }
+
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ m.dirty[key] = e
+ }
+ actual, loaded, ok = e.tryLoadOrStore(value)
+ // @added by henrylee2cn 2017/12/01
+ if ok && !loaded {
+ atomic.AddInt32(&m.length, 1)
+ }
+ } else if e, ok := m.dirty[key]; ok {
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ m.missLocked()
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(readOnly{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ atomic.AddInt32(&m.length, 1)
+ actual, loaded = value, false
+ }
+ m.mu.Unlock()
+
+ return actual, loaded
+}
+
+// tryLoadOrStore atomically loads or stores a value if the entry is not
+// expunged.
+//
+// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
+// returns with ok==false.
+func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return nil, false, false
+ }
+ if p != nil {
+ return *(*interface{})(p), true, true
+ }
+
+ // Copy the interface after the first load to make this method more amenable
+ // to escape analysis: if we hit the "load" path or the entry is expunged, we
+ // shouldn't bother heap-allocating.
+ ic := i
+ for {
+ if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
+ return i, false, true
+ }
+ p = atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return nil, false, false
+ }
+ if p != nil {
+ return *(*interface{})(p), true, true
+ }
+ }
+}
+
+// Delete deletes the value for a key.
+func (m *atomicMap) Delete(key interface{}) {
+ read, _ := m.read.Load().(readOnly)
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ if _, ok = m.dirty[key]; ok {
+ delete(m.dirty, key)
+ atomic.AddInt32(&m.length, -1)
+ m.mu.Unlock()
+ return
+ }
+ }
+ m.mu.Unlock()
+ }
+ if ok && e.delete() {
+ atomic.AddInt32(&m.length, -1)
+ }
+}
+
+func (e *entry) delete() (hadValue bool) {
+ for {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == expunged {
+ return false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, nil) {
+ return true
+ }
+ }
+}
+
+// Range calls f sequentially for each key and value present in the map.
+// If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot of the atomicMap's
+// contents: no key will be visited more than once, but if the value for any key
+// is stored or deleted concurrently, Range may reflect any mapping for that key
+// from any point during the Range call.
+//
+// Range may be O(N) with the number of elements in the map even if f returns
+// false after a constant number of calls.
+func (m *atomicMap) Range(f func(key, value interface{}) bool) {
+ // We need to be able to iterate over all of the keys that were already
+ // present at the start of the call to Range.
+ // If read.amended is false, then read.m satisfies that property without
+ // requiring us to hold m.mu for a long time.
+ read, _ := m.read.Load().(readOnly)
+ if read.amended {
+ // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
+ // (assuming the caller does not break out early), so a call to Range
+ // amortizes an entire copy of the map: we can promote the dirty copy
+ // immediately!
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if read.amended {
+ read = readOnly{m: m.dirty}
+ m.read.Store(read)
+ m.dirty = nil
+ m.misses = 0
+ }
+ m.mu.Unlock()
+ }
+
+ for k, e := range read.m {
+ v, ok := e.load()
+ if !ok {
+ continue
+ }
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+// Clear clears all current data in the map.
+func (m *atomicMap) Clear() {
+ // We need to be able to iterate over all of the keys that were already
+ // present at the start of the call to Range.
+ // If read.amended is false, then read.m satisfies that property without
+ // requiring us to hold m.mu for a long time.
+ read, _ := m.read.Load().(readOnly)
+ if read.amended {
+ // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
+ // (assuming the caller does not break out early), so a call to Range
+ // amortizes an entire copy of the map: we can promote the dirty copy
+ // immediately!
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if read.amended {
+ read = readOnly{m: m.dirty}
+ m.read.Store(read)
+ m.dirty = nil
+ m.misses = 0
+ }
+ m.mu.Unlock()
+ }
+
+ for _, e := range read.m {
+ _, ok := e.load()
+ if !ok {
+ continue
+ }
+ if e.delete() {
+ atomic.AddInt32(&m.length, -1)
+ }
+ }
+}
+
+func (m *atomicMap) missLocked() {
+ m.misses++
+ if m.misses < len(m.dirty) {
+ return
+ }
+ m.read.Store(readOnly{m: m.dirty})
+ m.dirty = nil
+ m.misses = 0
+}
+
+func (m *atomicMap) dirtyLocked() {
+ if m.dirty != nil {
+ return
+ }
+
+ read, _ := m.read.Load().(readOnly)
+ m.dirty = make(map[interface{}]*entry, len(read.m))
+ for k, e := range read.m {
+ if !e.tryExpungeLocked() {
+ m.dirty[k] = e
+ }
+ }
+}
+
+func (e *entry) tryExpungeLocked() (isExpunged bool) {
+ p := atomic.LoadPointer(&e.p)
+ for p == nil {
+ if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
+ return true
+ }
+ p = atomic.LoadPointer(&e.p)
+ }
+ return p == expunged
+}
+
+// Len returns the length of the map.
+// Note:
+// the length may be inaccurate.
+// @added by henrylee2cn 2017/11/17
+func (m *atomicMap) Len() int {
+ return int(atomic.LoadInt32(&m.length))
+}
+
+// Random returns a pair kv randomly.
+// If exist=false, no kv data is exist.
+// @added by henrylee2cn 2017/08/10
+func (m *atomicMap) Random() (key, value interface{}, exist bool) {
+ var (
+ length, i int
+ read readOnly
+ e *entry
+ )
+ for {
+ read, _ = m.read.Load().(readOnly)
+ if read.amended {
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if read.amended {
+ read = readOnly{m: m.dirty}
+ m.read.Store(read)
+ m.dirty = nil
+ m.misses = 0
+ }
+ m.mu.Unlock()
+ }
+ length = m.Len()
+ if length <= 0 {
+ return nil, nil, false
+ }
+ i = rand.Intn(length)
+ for key, e = range read.m {
+ value, exist = e.load()
+ if !exist {
+ continue
+ }
+ if i > 0 {
+ i--
+ continue
+ }
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/other.go b/vendor/github.com/henrylee2cn/goutil/other.go
new file mode 100644
index 0000000..393c268
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/other.go
@@ -0,0 +1,67 @@
+package goutil
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// AddrInt returns a pointer int representing the address of i.
+func AddrInt(i int) *int {
+ return &i
+}
+
+// InitAndGetString if strPtr is empty string, initialize it with def,
+// and return the final value.
+func InitAndGetString(strPtr *string, def string) string {
+ if strPtr == nil {
+ return def
+ }
+ if *strPtr == "" {
+ *strPtr = def
+ }
+ return *strPtr
+}
+
+// DereferenceType dereference, get the underlying non-pointer type.
+func DereferenceType(t reflect.Type) reflect.Type {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+// DereferenceValue dereference and unpack interface,
+// get the underlying non-pointer and non-interface value.
+func DereferenceValue(v reflect.Value) reflect.Value {
+ for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
+ v = v.Elem()
+ }
+ return v
+}
+
+func DereferencePtrValue(v reflect.Value) reflect.Value {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ return v
+}
+
+func DereferenceIfaceValue(v reflect.Value) reflect.Value {
+ for v.Kind() == reflect.Interface {
+ v = v.Elem()
+ }
+ return v
+}
+
+func DereferenceImplementType(v reflect.Value) reflect.Type {
+ return DereferenceType(DereferenceIfaceValue(v).Type())
+}
+
+// IsLittleEndian determine whether the current system is little endian.
+func IsLittleEndian() bool {
+ var i int32 = 0x01020304
+ u := unsafe.Pointer(&i)
+ pb := (*byte)(u)
+ b := *pb
+ return (b == 0x04)
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/pid_file.go b/vendor/github.com/henrylee2cn/goutil/pid_file.go
new file mode 100644
index 0000000..11e4bd1
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/pid_file.go
@@ -0,0 +1,31 @@
+package goutil
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// DEFAULT_PID_FILE the default PID file name
+var DEFAULT_PID_FILE = "log/PID"
+
+// WritePidFile writes the current PID to the specified file.
+func WritePidFile(pidFile ...string) {
+ fname := DEFAULT_PID_FILE
+ if len(pidFile) > 0 {
+ fname = pidFile[0]
+ }
+ abs, err := filepath.Abs(fname)
+ if err != nil {
+ panic(err)
+ }
+ dir := filepath.Dir(abs)
+ os.MkdirAll(dir, 0777)
+ pid := os.Getpid()
+ f, err := os.OpenFile(abs, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666)
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ f.WriteString(fmt.Sprintf("%d\n", pid))
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/random.go b/vendor/github.com/henrylee2cn/goutil/random.go
new file mode 100644
index 0000000..08a5019
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/random.go
@@ -0,0 +1,78 @@
+package goutil
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/base64"
+ mrand "math/rand"
+)
+
+// NewRandom creates a new padded Encoding defined by the given alphabet string.
+func NewRandom(alphabet string) *Random {
+ r := new(Random)
+ diff := 64 - len(alphabet)
+ if diff < 0 {
+ r.substitute = []byte(alphabet[64:])
+ r.substituteLen = len(r.substitute)
+ alphabet = alphabet[:64]
+ } else {
+ r.substitute = []byte(alphabet)
+ r.substituteLen = len(r.substitute)
+ if diff > 0 {
+ alphabet += string(bytes.Repeat([]byte{0x00}, diff))
+ }
+ }
+ r.encoding = base64.NewEncoding(alphabet).WithPadding(base64.NoPadding)
+ return r
+}
+
+// Random random string creater.
+type Random struct {
+ encoding *base64.Encoding
+ substitute []byte
+ substituteLen int
+}
+
+// RandomString returns a base64 encoded securely generated
+// random string. It will panic if the system's secure random number generator
+// fails to function correctly.
+// The length n must be an integer multiple of 4, otherwise the last character will be padded with `=`.
+func (r *Random) RandomString(n int) string {
+ d := r.encoding.DecodedLen(n)
+ buf := make([]byte, n)
+ r.encoding.Encode(buf, RandomBytes(d))
+ for k, v := range buf {
+ if v == 0x00 {
+ buf[k] = r.substitute[mrand.Intn(r.substituteLen)]
+ }
+ }
+ return BytesToString(buf)
+}
+
+const urlEncoder = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
+
+var urlRandom = &Random{
+ encoding: base64.URLEncoding,
+ substitute: []byte(urlEncoder),
+ substituteLen: len(urlEncoder),
+}
+
+// URLRandomString returns a URL-safe, base64 encoded securely generated
+// random string. It will panic if the system's secure random number generator
+// fails to function correctly.
+// The length n must be an integer multiple of 4, otherwise the last character will be padded with `=`.
+func URLRandomString(n int) string {
+ return urlRandom.RandomString(n)
+}
+
+// RandomBytes returns securely generated random bytes. It will panic
+// if the system's secure random number generator fails to function correctly.
+func RandomBytes(n int) []byte {
+ b := make([]byte, n)
+ _, err := rand.Read(b)
+ // Note that err == nil only if we read len(b) bytes.
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/sets.go b/vendor/github.com/henrylee2cn/goutil/sets.go
new file mode 100644
index 0000000..393e4b4
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/sets.go
@@ -0,0 +1,462 @@
+package goutil
+
+import "strconv"
+
+// StringsToBools converts string slice to bool slice.
+func StringsToBools(a []string) ([]bool, error) {
+ r := make([]bool, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseBool(v)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = i
+ }
+ return r, nil
+}
+
+// StringsToFloat32s converts string slice to float32 slice.
+func StringsToFloat32s(a []string) ([]float32, error) {
+ r := make([]float32, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseFloat(v, 32)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = float32(i)
+ }
+ return r, nil
+}
+
+// StringsToFloat64s converts string slice to float64 slice.
+func StringsToFloat64s(a []string) ([]float64, error) {
+ r := make([]float64, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = i
+ }
+ return r, nil
+}
+
+// StringsToInts converts string slice to int slice.
+func StringsToInts(a []string) ([]int, error) {
+ r := make([]int, len(a))
+ for k, v := range a {
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = i
+ }
+ return r, nil
+}
+
+// StringsToInt64s converts string slice to int64 slice.
+func StringsToInt64s(a []string) ([]int64, error) {
+ r := make([]int64, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = i
+ }
+ return r, nil
+}
+
+// StringsToInt32s converts string slice to int32 slice.
+func StringsToInt32s(a []string) ([]int32, error) {
+ r := make([]int32, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = int32(i)
+ }
+ return r, nil
+}
+
+// StringsToInt16s converts string slice to int16 slice.
+func StringsToInt16s(a []string) ([]int16, error) {
+ r := make([]int16, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseInt(v, 10, 16)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = int16(i)
+ }
+ return r, nil
+}
+
+// StringsToInt8s converts string slice to int8 slice.
+func StringsToInt8s(a []string) ([]int8, error) {
+ r := make([]int8, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseInt(v, 10, 8)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = int8(i)
+ }
+ return r, nil
+}
+
+// StringsToUint8s converts string slice to uint8 slice.
+func StringsToUint8s(a []string) ([]uint8, error) {
+ r := make([]uint8, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseUint(v, 10, 8)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = uint8(i)
+ }
+ return r, nil
+}
+
+// StringsToUint16s converts string slice to uint16 slice.
+func StringsToUint16s(a []string) ([]uint16, error) {
+ r := make([]uint16, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseUint(v, 10, 16)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = uint16(i)
+ }
+ return r, nil
+}
+
+// StringsToUint32s converts string slice to uint32 slice.
+func StringsToUint32s(a []string) ([]uint32, error) {
+ r := make([]uint32, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseUint(v, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = uint32(i)
+ }
+ return r, nil
+}
+
+// StringsToUint64s converts string slice to uint64 slice.
+func StringsToUint64s(a []string) ([]uint64, error) {
+ r := make([]uint64, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = uint64(i)
+ }
+ return r, nil
+}
+
+// StringsToUints converts string slice to uint slice.
+func StringsToUints(a []string) ([]uint, error) {
+ r := make([]uint, len(a))
+ for k, v := range a {
+ i, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ r[k] = uint(i)
+ }
+ return r, nil
+}
+
+// StringsConvert converts the string slice to a new slice using fn.
+// If fn returns error, exit the conversion and return the error.
+func StringsConvert(a []string, fn func(string) (string, error)) ([]string, error) {
+ ret := make([]string, len(a))
+ for i, s := range a {
+ r, err := fn(s)
+ if err != nil {
+ return nil, err
+ }
+ ret[i] = r
+ }
+ return ret, nil
+}
+
+// StringsConvertMap converts the string slice to a new map using fn.
+// If fn returns error, exit the conversion and return the error.
+func StringsConvertMap(a []string, fn func(string) (string, error)) (map[string]string, error) {
+ ret := make(map[string]string, len(a))
+ for _, s := range a {
+ r, err := fn(s)
+ if err != nil {
+ return nil, err
+ }
+ ret[s] = r
+ }
+ return ret, nil
+}
+
+// IntersectStrings calculate intersection of two sets.
+func IntersectStrings(set1, set2 []string) []string {
+ var intersect []string
+ var long, short = set1, set2
+ if len(set1) < len(set2) {
+ long, short = set2, set1
+ }
+
+ buf := make([]string, len(short))
+ copy(buf, short)
+ short = buf
+
+ for _, m := range long {
+ if len(short) == 0 {
+ break
+ }
+ for j, n := range short {
+ if m == n {
+ intersect = append(intersect, n)
+ short = short[:j+copy(short[j:], short[j+1:])]
+ break
+ }
+ }
+ }
+ return intersect
+}
+
+// StringsDistinct creates a string set that
+// removes the same elements and returns them in their original order.
+func StringsDistinct(a []string) (set []string) {
+ m := make(map[string]bool, len(a))
+ set = make([]string, 0, len(a))
+ for _, s := range a {
+ if m[s] {
+ continue
+ }
+ set = append(set, s)
+ m[s] = true
+ }
+ return set
+}
+
+// SetToStrings sets a element to the string set.
+func SetToStrings(set []string, a string) []string {
+ for _, s := range set {
+ if s == a {
+ return set
+ }
+ }
+ return append(set, a)
+}
+
+// RemoveFromStrings removes the first element from the string set.
+func RemoveFromStrings(set []string, a string) []string {
+ for i, s := range set {
+ if s == a {
+ return append(set[:i], set[i+1:]...)
+ }
+ }
+ return set
+}
+
+// RemoveAllFromStrings removes all the a element from the string set.
+func RemoveAllFromStrings(set []string, a string) []string {
+ length := len(set)
+ for {
+ set = RemoveFromStrings(set, a)
+ if length == len(set) {
+ return set
+ }
+ length = len(set)
+ }
+}
+
+// IntsDistinct creates a int set that
+// removes the same elements and returns them in their original order.
+func IntsDistinct(a []int) (set []int) {
+ m := make(map[int]bool, len(a))
+ set = make([]int, 0, len(a))
+ for _, s := range a {
+ if m[s] {
+ continue
+ }
+ set = append(set, s)
+ m[s] = true
+ }
+ return set
+}
+
+// SetToInts sets a element to the int set.
+func SetToInts(set []int, a int) []int {
+ for _, s := range set {
+ if s == a {
+ return set
+ }
+ }
+ return append(set, a)
+}
+
+// RemoveFromInts removes the first element from the int set.
+func RemoveFromInts(set []int, a int) []int {
+ for i, s := range set {
+ if s == a {
+ return append(set[:i], set[i+1:]...)
+ }
+ }
+ return set
+}
+
+// RemoveAllFromInts removes all the a element from the int set.
+func RemoveAllFromInts(set []int, a int) []int {
+ length := len(set)
+ for {
+ set = RemoveFromInts(set, a)
+ if length == len(set) {
+ return set
+ }
+ length = len(set)
+ }
+}
+
+// Int32sDistinct creates a int32 set that
+// removes the same element32s and returns them in their original order.
+func Int32sDistinct(a []int32) (set []int32) {
+ m := make(map[int32]bool, len(a))
+ set = make([]int32, 0, len(a))
+ for _, s := range a {
+ if m[s] {
+ continue
+ }
+ set = append(set, s)
+ m[s] = true
+ }
+ return set
+}
+
+// SetToInt32s sets a element to the int32 set.
+func SetToInt32s(set []int32, a int32) []int32 {
+ for _, s := range set {
+ if s == a {
+ return set
+ }
+ }
+ return append(set, a)
+}
+
+// RemoveFromInt32s removes the first element from the int32 set.
+func RemoveFromInt32s(set []int32, a int32) []int32 {
+ for i, s := range set {
+ if s == a {
+ return append(set[:i], set[i+1:]...)
+ }
+ }
+ return set
+}
+
+// RemoveAllFromInt32s removes all the a element from the int32 set.
+func RemoveAllFromInt32s(set []int32, a int32) []int32 {
+ length := len(set)
+ for {
+ set = RemoveFromInt32s(set, a)
+ if length == len(set) {
+ return set
+ }
+ length = len(set)
+ }
+}
+
+// Int64sDistinct creates a int64 set that
+// removes the same element64s and returns them in their original order.
+func Int64sDistinct(a []int64) (set []int64) {
+ m := make(map[int64]bool, len(a))
+ set = make([]int64, 0, len(a))
+ for _, s := range a {
+ if m[s] {
+ continue
+ }
+ set = append(set, s)
+ m[s] = true
+ }
+ return set
+}
+
+// SetToInt64s sets a element to the int64 set.
+func SetToInt64s(set []int64, a int64) []int64 {
+ for _, s := range set {
+ if s == a {
+ return set
+ }
+ }
+ return append(set, a)
+}
+
+// RemoveFromInt64s removes the first element from the int64 set.
+func RemoveFromInt64s(set []int64, a int64) []int64 {
+ for i, s := range set {
+ if s == a {
+ return append(set[:i], set[i+1:]...)
+ }
+ }
+ return set
+}
+
+// RemoveAllFromInt64s removes all the a element from the int64 set.
+func RemoveAllFromInt64s(set []int64, a int64) []int64 {
+ length := len(set)
+ for {
+ set = RemoveFromInt64s(set, a)
+ if length == len(set) {
+ return set
+ }
+ length = len(set)
+ }
+}
+
+// InterfacesDistinct creates a interface{} set that
+// removes the same elementerface{}s and returns them in their original order.
+func InterfacesDistinct(a []interface{}) (set []interface{}) {
+ m := make(map[interface{}]bool, len(a))
+ set = make([]interface{}, 0, len(a))
+ for _, s := range a {
+ if m[s] {
+ continue
+ }
+ set = append(set, s)
+ m[s] = true
+ }
+ return set
+}
+
+// SetToInterfaces sets a element to the interface{} set.
+func SetToInterfaces(set []interface{}, a interface{}) []interface{} {
+ for _, s := range set {
+ if s == a {
+ return set
+ }
+ }
+ return append(set, a)
+}
+
+// RemoveFromInterfaces removes the first element from the interface{} set.
+func RemoveFromInterfaces(set []interface{}, a interface{}) []interface{} {
+ for i, s := range set {
+ if s == a {
+ return append(set[:i], set[i+1:]...)
+ }
+ }
+ return set
+}
+
+// RemoveAllFromInterfaces removes all the a element from the interface{} set.
+func RemoveAllFromInterfaces(set []interface{}, a interface{}) []interface{} {
+ length := len(set)
+ for {
+ set = RemoveFromInterfaces(set, a)
+ if length == len(set) {
+ return set
+ }
+ length = len(set)
+ }
+}
diff --git a/vendor/github.com/henrylee2cn/goutil/string.go b/vendor/github.com/henrylee2cn/goutil/string.go
new file mode 100644
index 0000000..6ff6a9a
--- /dev/null
+++ b/vendor/github.com/henrylee2cn/goutil/string.go
@@ -0,0 +1,383 @@
+package goutil
+
+import (
+ "bytes"
+ "strings"
+ "unicode/utf8"
+ "unsafe"
+)
+
+// BytesToString convert []byte type to string type.
+func BytesToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes convert string type to []byte type.
+// NOTE: panic if modify the member value of the []byte.
+func StringToBytes(s string) []byte {
+ sp := *(*[2]uintptr)(unsafe.Pointer(&s))
+ bp := [3]uintptr{sp[0], sp[1], sp[1]}
+ return *(*[]byte)(unsafe.Pointer(&bp))
+}
+
+// SnakeString converts the accepted string to a snake string (XxYy to xx_yy)
+func SnakeString(s string) string {
+ data := make([]byte, 0, len(s)*2)
+ j := false
+ for _, d := range StringToBytes(s) {
+ if d >= 'A' && d <= 'Z' {
+ if j {
+ data = append(data, '_')
+ j = false
+ }
+ } else if d != '_' {
+ j = true
+ }
+ data = append(data, d)
+ }
+ return strings.ToLower(BytesToString(data))
+}
+
+// CamelString converts the accepted string to a camel string (xx_yy to XxYy)
+func CamelString(s string) string {
+ data := make([]byte, 0, len(s))
+ j := false
+ k := false
+ num := len(s) - 1
+ for i := 0; i <= num; i++ {
+ d := s[i]
+ if k == false && d >= 'A' && d <= 'Z' {
+ k = true
+ }
+ if d >= 'a' && d <= 'z' && (j || k == false) {
+ d = d - 32
+ j = false
+ k = true
+ }
+ if k && d == '_' && num > i && s[i+1] >= 'a' && s[i+1] <= 'z' {
+ j = true
+ continue
+ }
+ data = append(data, d)
+ }
+ return string(data[:])
+}
+
+var spaceReplacer = strings.NewReplacer(
+ " ", " ",
+ "\n\n", "\n",
+ "\r\r", "\r",
+ "\t\t", "\t",
+ "\r\n\r\n", "\r\n",
+ " \n", "\n",
+ "\t\n", "\n",
+ " \t", "\t",
+ "\t ", "\t",
+ "\v\v", "\v",
+ "\f\f", "\f",
+ string(0x85)+string(0x85),
+ string(0x85),
+ string(0xA0)+string(0xA0),
+ string(0xA0),
+)
+
+// SpaceInOne combines multiple consecutive space characters into one.
+func SpaceInOne(s string) string {
+ var old string
+ for old != s {
+ old = s
+ s = spaceReplacer.Replace(s)
+ }
+ return s
+}
+
+// StringMarshalJSON converts the string to JSON byte stream.
+func StringMarshalJSON(s string, escapeHTML bool) []byte {
+ a := StringToBytes(s)
+ var buf = bytes.NewBuffer(make([]byte, 0, 64))
+ buf.WriteByte('"')
+ start := 0
+ for i := 0; i < len(a); {
+ if b := a[i]; b < utf8.RuneSelf {
+ if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+ i++
+ continue
+ }
+ if start < i {
+ buf.Write(a[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ buf.WriteByte('\\')
+ buf.WriteByte(b)
+ case '\n':
+ buf.WriteByte('\\')
+ buf.WriteByte('n')
+ case '\r':
+ buf.WriteByte('\\')
+ buf.WriteByte('r')
+ case '\t':
+ buf.WriteByte('\\')
+ buf.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \t, \n and \r.
+ // If escapeHTML is set, it also escapes <, >, and &
+ // because they can lead to security holes when
+ // user-controlled strings are rendered into JSON
+ // and served to some browsers.
+ buf.WriteString(`\u00`)
+ buf.WriteByte(hexSet[b>>4])
+ buf.WriteByte(hexSet[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(a[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ buf.Write(a[start:i])
+ }
+ buf.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ buf.Write(a[start:i])
+ }
+ buf.WriteString(`\u202`)
+ buf.WriteByte(hexSet[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(a) {
+ buf.Write(a[start:])
+ }
+ buf.WriteByte('"')
+ return buf.Bytes()
+}
+
+var hexSet = "0123456789abcdef"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+ ' ': true,
+ '!': true,
+ '"': false,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+ ',': true,
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+ ';': true,
+ '<': true,
+ '=': true,
+ '>': true,
+ '?': true,
+ '@': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '[': true,
+ '\\': false,
+ ']': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '{': true,
+ '|': true,
+ '}': true,
+ '~': true,
+ '\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML " that closes the next token. If
+ // non-empty, the subsequent call to Next will return a raw or RCDATA text
+ // token: one that treats "
" as text instead of an element.
+ // rawTag's contents are lower-cased.
+ rawTag string
+ // textIsRaw is whether the current text token's data is not escaped.
+ textIsRaw bool
+ // convertNUL is whether NUL bytes in the current token's data should
+ // be converted into \ufffd replacement characters.
+ convertNUL bool
+ // allowCDATA is whether CDATA sections are allowed in the current context.
+ allowCDATA bool
+}
+
+// AllowCDATA sets whether or not the tokenizer recognizes as
+// the text "foo". The default value is false, which means to recognize it as
+// a bogus comment "" instead.
+//
+// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
+// only if tokenizing foreign content, such as MathML and SVG. However,
+// tracking foreign-contentness is difficult to do purely in the tokenizer,
+// as opposed to the parser, due to HTML integration points: an